code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
from timeit import timeit def lowerCamelCase__ (__lowerCamelCase ): if number < 0: raise ValueError("the value of input must not be negative" ) _SCREAMING_SNAKE_CASE : str = 0 while number: number &= number - 1 result += 1 return result def lowerCamelCase__ (__lowerCamelCase ): if number < 0: raise ValueError("the value of input must not be negative" ) _SCREAMING_SNAKE_CASE : str = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def lowerCamelCase__ (): def do_benchmark(__lowerCamelCase ) -> None: _SCREAMING_SNAKE_CASE : Tuple = "import __main__ as z" print(f"""Benchmark when {number = }:""" ) print(f"""{get_set_bits_count_using_modulo_operator(__lowerCamelCase ) = }""" ) _SCREAMING_SNAKE_CASE : str = timeit("z.get_set_bits_count_using_modulo_operator(25)", setup=__lowerCamelCase ) print(f"""timeit() runs in {timing} seconds""" ) print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(__lowerCamelCase ) = }""" ) _SCREAMING_SNAKE_CASE : int = timeit( "z.get_set_bits_count_using_brian_kernighans_algorithm(25)", setup=__lowerCamelCase, ) print(f"""timeit() runs in {timing} seconds""" ) for number in (25, 37, 58, 0): do_benchmark(__lowerCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
363
from __future__ import annotations import typing from collections import Counter def lowerCamelCase__ (__lowerCamelCase ): _SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter() for base in range(1, max_perimeter + 1 ): for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ): _SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(__lowerCamelCase ): _SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def lowerCamelCase__ (__lowerCamelCase = 1000 ): _SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(f"Perimeter {solution()} has maximum solutions")
325
0
from __future__ import annotations class lowerCAmelCase__: '''simple docstring''' def __init__( self , __lowerCamelCase = 0 ) -> int: _SCREAMING_SNAKE_CASE : int = key def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> list[str]: assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = key or self.__key or 1 # make sure key is an appropriate size key %= 2_5_5 return [chr(ord(__lowerCamelCase ) ^ key ) for ch in content] def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> list[str]: assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = key or self.__key or 1 # make sure key is an appropriate size key %= 2_5_5 return [chr(ord(__lowerCamelCase ) ^ key ) for ch in content] def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 0 ) -> str: assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = key or self.__key or 1 # make sure key can be any size while key > 2_5_5: key -= 2_5_5 # This will be returned _SCREAMING_SNAKE_CASE : Tuple = "" for ch in content: ans += chr(ord(__lowerCamelCase ) ^ key ) return ans def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 0 ) -> str: assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = key or self.__key or 1 # make sure key can be any size while key > 2_5_5: key -= 2_5_5 # This will be returned _SCREAMING_SNAKE_CASE : int = "" for ch in content: ans += chr(ord(__lowerCamelCase ) ^ key ) return ans def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 0 ) -> bool: assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ) try: with open(__lowerCamelCase ) as fin, open("encrypt.out" , "w+" ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(__lowerCamelCase , __lowerCamelCase ) ) except OSError: return False return True def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> bool: assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ) try: with open(__lowerCamelCase ) as fin, open("decrypt.out" , "w+" ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(__lowerCamelCase , __lowerCamelCase ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
364
from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch("socket.socket" ) @patch("builtins.open" ) def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ): # ===== initialization ===== _SCREAMING_SNAKE_CASE : List[Any] = Mock() _SCREAMING_SNAKE_CASE : Optional[Any] = conn, Mock() _SCREAMING_SNAKE_CASE : Dict = iter([1, None] ) _SCREAMING_SNAKE_CASE : Optional[Any] = lambda __lowerCamelCase : next(__lowerCamelCase ) # ===== invoke ===== send_file(filename="mytext.txt", testing=__lowerCamelCase ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
325
0
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class lowerCAmelCase__( unittest.TestCase ): @slow def UpperCamelCase_ ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" ) _SCREAMING_SNAKE_CASE : List[str] = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" ) model.to(__lowerCamelCase ) from datasets import load_dataset _SCREAMING_SNAKE_CASE : int = load_dataset("nielsr/rvlcdip-demo" ) _SCREAMING_SNAKE_CASE : Optional[Any] = dataset["train"][0]["image"].convert("RGB" ) _SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[Any] = model(**__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = outputs.logits _SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 1_6) ) self.assertEqual(logits.shape , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = torch.tensor( [-0.4158, -0.4092, -0.4347] , device=__lowerCamelCase , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
365
import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class lowerCAmelCase__( __lowercase ): '''simple docstring''' __snake_case = ['image_processor', 'tokenizer'] __snake_case = 'BlipImageProcessor' __snake_case = 'AutoTokenizer' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: super().__init__(__lowerCamelCase , __lowerCamelCase ) # add QFormer tokenizer _SCREAMING_SNAKE_CASE : List[str] = qformer_tokenizer def __call__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , **__lowerCamelCase , ) -> BatchFeature: if images is None and text is None: raise ValueError("You have to specify at least images or text." ) _SCREAMING_SNAKE_CASE : Any = BatchFeature() if text is not None: _SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) encoding.update(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = self.qformer_tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) _SCREAMING_SNAKE_CASE : str = qformer_text_encoding.pop("input_ids" ) _SCREAMING_SNAKE_CASE : List[Any] = qformer_text_encoding.pop("attention_mask" ) if images is not None: _SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase ) encoding.update(__lowerCamelCase ) return encoding def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> str: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def UpperCamelCase_ ( self ) -> str: _SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.model_input_names _SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def UpperCamelCase_ ( self , __lowerCamelCase , **__lowerCamelCase ) -> Any: if os.path.isfile(__lowerCamelCase ): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , "qformer_tokenizer" ) self.qformer_tokenizer.save_pretrained(__lowerCamelCase ) return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase ) @classmethod def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" ) _SCREAMING_SNAKE_CASE : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase ) args.append(__lowerCamelCase ) return cls(*__lowerCamelCase )
325
0
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def lowerCamelCase__ (__lowerCamelCase ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> str: super().__init__() _SCREAMING_SNAKE_CASE : Any = module _SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Sequential( nn.Linear(module.in_features , __lowerCamelCase , bias=__lowerCamelCase ) , nn.Linear(__lowerCamelCase , module.out_features , bias=__lowerCamelCase ) , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=__lowerCamelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def UpperCamelCase_ ( self , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) -> List[Any]: return self.module(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) + self.adapter(__lowerCamelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class lowerCAmelCase__( unittest.TestCase ): '''simple docstring''' __snake_case = 'bigscience/bloom-1b7' # Constant values __snake_case = 2.109_6595_5269_2574 __snake_case = 'Hello my name is' __snake_case = set() EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' ) EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' ) EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' ) __snake_case = 1_0 def UpperCamelCase_ ( self ) -> List[Any]: # Models and tokenizer _SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained(self.model_name ) class lowerCAmelCase__( __lowercase ): '''simple docstring''' def UpperCamelCase_ ( self ) -> int: super().setUp() # Models and tokenizer _SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="auto" ) _SCREAMING_SNAKE_CASE : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase , device_map="auto" ) def UpperCamelCase_ ( self ) -> Any: del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self ) -> Dict: _SCREAMING_SNAKE_CASE : List[str] = self.model_abit.config self.assertTrue(hasattr(__lowerCamelCase , "quantization_config" ) ) _SCREAMING_SNAKE_CASE : Optional[Any] = config.to_dict() _SCREAMING_SNAKE_CASE : int = config.to_diff_dict() _SCREAMING_SNAKE_CASE : List[Any] = config.to_json_string() def UpperCamelCase_ ( self ) -> Optional[Any]: from bitsandbytes.nn import Paramsabit _SCREAMING_SNAKE_CASE : str = self.model_fpaa.get_memory_footprint() _SCREAMING_SNAKE_CASE : Optional[Any] = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) _SCREAMING_SNAKE_CASE : List[str] = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def UpperCamelCase_ ( self ) -> Dict: from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__lowerCamelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def UpperCamelCase_ ( self ) -> Any: _SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" ) _SCREAMING_SNAKE_CASE : List[Any] = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__lowerCamelCase ) , self.EXPECTED_OUTPUTS ) def UpperCamelCase_ ( self ) -> Tuple: _SCREAMING_SNAKE_CASE : int = BitsAndBytesConfig() _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__lowerCamelCase , device_map="auto" ) _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(self.input_text , return_tensors="pt" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = model_abit_from_config.generate( input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__lowerCamelCase ) , self.EXPECTED_OUTPUTS ) def UpperCamelCase_ ( self ) -> Union[str, Any]: with self.assertRaises(__lowerCamelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__lowerCamelCase ) def UpperCamelCase_ ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : str = BitsAndBytesConfig() with self.assertRaises(__lowerCamelCase ): _SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__lowerCamelCase , load_in_abit=__lowerCamelCase , device_map="auto" , bnb_abit_quant_type="nf4" , ) def UpperCamelCase_ ( self ) -> str: with self.assertRaises(__lowerCamelCase ): # Tries with `str` self.model_abit.to("cpu" ) with self.assertRaises(__lowerCamelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__lowerCamelCase ): # Tries with a `device` self.model_abit.to(torch.device("cuda:0" ) ) with self.assertRaises(__lowerCamelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__lowerCamelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything _SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" ) _SCREAMING_SNAKE_CASE : Any = self.model_fpaa.to(torch.floataa ) _SCREAMING_SNAKE_CASE : int = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 ) # Check this does not throw an error _SCREAMING_SNAKE_CASE : Any = self.model_fpaa.to("cpu" ) # Check this does not throw an error _SCREAMING_SNAKE_CASE : int = self.model_fpaa.half() # Check this does not throw an error _SCREAMING_SNAKE_CASE : Dict = self.model_fpaa.float() def UpperCamelCase_ ( self ) -> int: _SCREAMING_SNAKE_CASE : Any = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=__lowerCamelCase , device_map="auto" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class lowerCAmelCase__( unittest.TestCase ): '''simple docstring''' @classmethod def UpperCamelCase_ ( cls ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : List[Any] = "t5-small" _SCREAMING_SNAKE_CASE : Union[str, Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense _SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained(cls.model_name ) _SCREAMING_SNAKE_CASE : List[Any] = "Translate in German: Hello, my dog is cute" def UpperCamelCase_ ( self ) -> Union[str, Any]: gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self ) -> Union[str, Any]: from transformers import TaForConditionalGeneration _SCREAMING_SNAKE_CASE : List[str] = TaForConditionalGeneration._keep_in_fpaa_modules _SCREAMING_SNAKE_CASE : Optional[Any] = None # test with `t5-small` _SCREAMING_SNAKE_CASE : Optional[int] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase , device_map="auto" ) _SCREAMING_SNAKE_CASE : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) _SCREAMING_SNAKE_CASE : Optional[int] = model.generate(**__lowerCamelCase ) # test with `flan-t5-small` _SCREAMING_SNAKE_CASE : Union[str, Any] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__lowerCamelCase , device_map="auto" ) _SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) _SCREAMING_SNAKE_CASE : Any = model.generate(**__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = modules def UpperCamelCase_ ( self ) -> str: import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` _SCREAMING_SNAKE_CASE : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase , device_map="auto" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) _SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) _SCREAMING_SNAKE_CASE : int = model.generate(**__lowerCamelCase ) # test with `flan-t5-small` _SCREAMING_SNAKE_CASE : int = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__lowerCamelCase , device_map="auto" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) _SCREAMING_SNAKE_CASE : int = model.generate(**__lowerCamelCase ) class lowerCAmelCase__( __lowercase ): '''simple docstring''' def UpperCamelCase_ ( self ) -> Dict: super().setUp() # model_name _SCREAMING_SNAKE_CASE : str = "bigscience/bloom-560m" _SCREAMING_SNAKE_CASE : List[Any] = "t5-small" # Different types of model _SCREAMING_SNAKE_CASE : int = AutoModel.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase , device_map="auto" ) # Sequence classification model _SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=__lowerCamelCase , device_map="auto" ) # CausalLM model _SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase , device_map="auto" ) # Seq2seq model _SCREAMING_SNAKE_CASE : Tuple = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=__lowerCamelCase , device_map="auto" ) def UpperCamelCase_ ( self ) -> str: del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self ) -> List[Any]: from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class lowerCAmelCase__( __lowercase ): '''simple docstring''' def UpperCamelCase_ ( self ) -> int: super().setUp() def UpperCamelCase_ ( self ) -> Optional[Any]: del self.pipe gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self ) -> str: _SCREAMING_SNAKE_CASE : int = pipeline( "text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass _SCREAMING_SNAKE_CASE : Optional[Any] = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class lowerCAmelCase__( __lowercase ): '''simple docstring''' def UpperCamelCase_ ( self ) -> Optional[int]: super().setUp() def UpperCamelCase_ ( self ) -> int: _SCREAMING_SNAKE_CASE : int = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=__lowerCamelCase , device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model _SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" ) # Second real batch _SCREAMING_SNAKE_CASE : Optional[Any] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__lowerCamelCase ) , self.EXPECTED_OUTPUTS ) class lowerCAmelCase__( __lowercase ): '''simple docstring''' def UpperCamelCase_ ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : str = "facebook/opt-350m" super().setUp() def UpperCamelCase_ ( self ) -> int: if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ): return # Step 1: freeze all parameters _SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__lowerCamelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): _SCREAMING_SNAKE_CASE : Tuple = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability _SCREAMING_SNAKE_CASE : Optional[int] = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__lowerCamelCase ) ): _SCREAMING_SNAKE_CASE : List[Any] = LoRALayer(module.q_proj , rank=1_6 ) _SCREAMING_SNAKE_CASE : Any = LoRALayer(module.k_proj , rank=1_6 ) _SCREAMING_SNAKE_CASE : Any = LoRALayer(module.v_proj , rank=1_6 ) # Step 3: dummy batch _SCREAMING_SNAKE_CASE : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): _SCREAMING_SNAKE_CASE : Tuple = model.forward(**__lowerCamelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(__lowerCamelCase , __lowerCamelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__lowerCamelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class lowerCAmelCase__( __lowercase ): '''simple docstring''' __snake_case = 'gpt2-xl' __snake_case = 3.3191_8548_5415_2187
366
from maths.prime_check import is_prime def lowerCamelCase__ (__lowerCamelCase ): if not isinstance(__lowerCamelCase, __lowerCamelCase ): _SCREAMING_SNAKE_CASE : List[str] = f"""Input value of [number={number}] must be an integer""" raise TypeError(__lowerCamelCase ) if is_prime(__lowerCamelCase ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
325
0
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class lowerCAmelCase__( __lowercase ): '''simple docstring''' def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[Any]: with open(__lowerCamelCase , encoding="utf-8" ) as input_file: _SCREAMING_SNAKE_CASE : Tuple = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) _SCREAMING_SNAKE_CASE : List[Any] = input_file.read() _SCREAMING_SNAKE_CASE : List[Any] = regexp.search(__lowerCamelCase ) return match def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any: with open(__lowerCamelCase , encoding="utf-8" ) as input_file: _SCREAMING_SNAKE_CASE : Tuple = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) _SCREAMING_SNAKE_CASE : Any = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` _SCREAMING_SNAKE_CASE : Union[str, Any] = regexp.finditer(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def UpperCamelCase_ ( self ) -> int: _SCREAMING_SNAKE_CASE : List[Any] = Path("./datasets" ) _SCREAMING_SNAKE_CASE : int = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowerCamelCase ) ): raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" ) def UpperCamelCase_ ( self ) -> Tuple: _SCREAMING_SNAKE_CASE : Union[str, Any] = Path("./datasets" ) _SCREAMING_SNAKE_CASE : int = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowerCamelCase ) ): raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
367
from argparse import ArgumentParser from . import BaseTransformersCLICommand def lowerCamelCase__ (__lowerCamelCase ): return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code ) class lowerCAmelCase__( __lowercase ): '''simple docstring''' @staticmethod def UpperCamelCase_ ( __lowerCamelCase ) -> str: _SCREAMING_SNAKE_CASE : List[Any] = parser.add_parser("download" ) download_parser.add_argument( "--cache-dir" , type=__lowerCamelCase , default=__lowerCamelCase , help="Path to location to store the models" ) download_parser.add_argument( "--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" ) download_parser.add_argument( "--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , ) download_parser.add_argument("model" , type=__lowerCamelCase , help="Name of the model to download" ) download_parser.set_defaults(func=__lowerCamelCase ) def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]: _SCREAMING_SNAKE_CASE : Any = model _SCREAMING_SNAKE_CASE : Optional[int] = cache _SCREAMING_SNAKE_CASE : str = force _SCREAMING_SNAKE_CASE : str = trust_remote_code def UpperCamelCase_ ( self ) -> Optional[Any]: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
325
0
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError("The length of profit and weight must be same." ) if max_weight <= 0: raise ValueError("max_weight must greater than zero." ) if any(p < 0 for p in profit ): raise ValueError("Profit can not be negative." ) if any(w < 0 for w in weight ): raise ValueError("Weight can not be negative." ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. _SCREAMING_SNAKE_CASE : Optional[Any] = [p / w for p, w in zip(__lowerCamelCase, __lowerCamelCase )] # Creating a copy of the list and sorting profit/weight in ascending order _SCREAMING_SNAKE_CASE : Union[str, Any] = sorted(__lowerCamelCase ) # declaring useful variables _SCREAMING_SNAKE_CASE : Optional[int] = len(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = 0 _SCREAMING_SNAKE_CASE : Tuple = 0 _SCREAMING_SNAKE_CASE : List[str] = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight _SCREAMING_SNAKE_CASE : Any = sorted_profit_by_weight[length - i - 1] _SCREAMING_SNAKE_CASE : Dict = profit_by_weight.index(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( 'Input profits, weights, and then max_weight (all positive ints) separated by ' 'spaces.' ) UpperCamelCase__ =[int(x) for x in input('Input profits separated by spaces: ').split()] UpperCamelCase__ =[int(x) for x in input('Input weights separated by spaces: ').split()] UpperCamelCase__ =int(input('Max weight allowed: ')) # Function Call calc_profit(profit, weight, max_weight)
368
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class lowerCAmelCase__: '''simple docstring''' __snake_case = BlenderbotSmallConfig __snake_case = {} __snake_case = 'gelu' def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> List[str]: _SCREAMING_SNAKE_CASE : int = parent _SCREAMING_SNAKE_CASE : Tuple = batch_size _SCREAMING_SNAKE_CASE : Dict = seq_length _SCREAMING_SNAKE_CASE : List[str] = is_training _SCREAMING_SNAKE_CASE : List[str] = use_labels _SCREAMING_SNAKE_CASE : Dict = vocab_size _SCREAMING_SNAKE_CASE : Dict = hidden_size _SCREAMING_SNAKE_CASE : int = num_hidden_layers _SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads _SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size _SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob _SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings _SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id _SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id _SCREAMING_SNAKE_CASE : List[str] = bos_token_id def UpperCamelCase_ ( self ) -> List[str]: _SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _SCREAMING_SNAKE_CASE : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 ) _SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE : str = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _SCREAMING_SNAKE_CASE : List[Any] = prepare_blenderbot_small_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return config, inputs_dict def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple: _SCREAMING_SNAKE_CASE : Any = TFBlenderbotSmallModel(config=__lowerCamelCase ).get_decoder() _SCREAMING_SNAKE_CASE : Dict = inputs_dict["input_ids"] _SCREAMING_SNAKE_CASE : List[Any] = input_ids[:1, :] _SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"][:1, :] _SCREAMING_SNAKE_CASE : List[str] = inputs_dict["head_mask"] _SCREAMING_SNAKE_CASE : int = 1 # first forward pass _SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size ) _SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) _SCREAMING_SNAKE_CASE : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] _SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx] _SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 ) def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ): if attention_mask is None: _SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(__lowerCamelCase, config.pad_token_id ), tf.inta ) if decoder_attention_mask is None: _SCREAMING_SNAKE_CASE : List[str] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ), ], axis=-1, ) if head_mask is None: _SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ): '''simple docstring''' __snake_case = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) __snake_case = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () __snake_case = ( { 'conversational': TFBlenderbotSmallForConditionalGeneration, 'feature-extraction': TFBlenderbotSmallModel, 'summarization': TFBlenderbotSmallForConditionalGeneration, 'text2text-generation': TFBlenderbotSmallForConditionalGeneration, 'translation': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) __snake_case = True __snake_case = False __snake_case = False def UpperCamelCase_ ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlenderbotSmallModelTester(self ) _SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=__lowerCamelCase ) def UpperCamelCase_ ( self ) -> Tuple: self.config_tester.run_common_tests() def UpperCamelCase_ ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase ) @require_tokenizers @require_tf class lowerCAmelCase__( unittest.TestCase ): '''simple docstring''' __snake_case = [ 'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ' ' i\'m going to throw up.\nand why is that?' ] __snake_case = 'facebook/blenderbot_small-90M' @cached_property def UpperCamelCase_ ( self ) -> List[Any]: # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) @cached_property def UpperCamelCase_ ( self ) -> str: _SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase_ ( self ) -> Tuple: _SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" ) _SCREAMING_SNAKE_CASE : Dict = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , ) _SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
325
0
# Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar UpperCamelCase__ =TypeVar('T') class lowerCAmelCase__( Generic[T] ): '''simple docstring''' def __init__( self , __lowerCamelCase = True ) -> None: _SCREAMING_SNAKE_CASE : dict[T, list[T]] = {} # dictionary of lists _SCREAMING_SNAKE_CASE : Any = directed def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> GraphAdjacencyList[T]: if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(__lowerCamelCase ) self.adj_list[destination_vertex].append(__lowerCamelCase ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: _SCREAMING_SNAKE_CASE : Optional[Any] = [destination_vertex] _SCREAMING_SNAKE_CASE : Dict = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(__lowerCamelCase ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: _SCREAMING_SNAKE_CASE : Union[str, Any] = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: _SCREAMING_SNAKE_CASE : Union[str, Any] = [destination_vertex] _SCREAMING_SNAKE_CASE : List[Any] = [] return self def __repr__( self ) -> str: return pformat(self.adj_list )
369
from math import isqrt, loga def lowerCamelCase__ (__lowerCamelCase ): _SCREAMING_SNAKE_CASE : List[Any] = [True] * max_number for i in range(2, isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2, __lowerCamelCase, __lowerCamelCase ): _SCREAMING_SNAKE_CASE : Optional[Any] = False return [i for i in range(2, __lowerCamelCase ) if is_prime[i]] def lowerCamelCase__ (__lowerCamelCase = 800800, __lowerCamelCase = 800800 ): _SCREAMING_SNAKE_CASE : Optional[int] = degree * loga(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = int(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = calculate_prime_numbers(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = 0 _SCREAMING_SNAKE_CASE : int = 0 _SCREAMING_SNAKE_CASE : Dict = len(__lowerCamelCase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f"{solution() = }")
325
0
import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) UpperCamelCase__ =logging.getLogger() UpperCamelCase__ =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class lowerCAmelCase__( __lowercase ): '''simple docstring''' def UpperCamelCase_ ( self , __lowerCamelCase ) -> Union[str, Any]: os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = {"source": "What is love ?", "target": "life"} _SCREAMING_SNAKE_CASE : Union[str, Any] = {"train": 1_2, "val": 2, "test": 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: _SCREAMING_SNAKE_CASE : Tuple = "\n".join([contents[field]] * n_lines[split] ) with open(os.path.join(__lowerCamelCase , F"""{split}.{field}""" ) , "w" ) as f: f.write(__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = "pytorch" ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_auto_remove_tmp_dir() _SCREAMING_SNAKE_CASE : int = os.path.join(__lowerCamelCase , "output" ) _SCREAMING_SNAKE_CASE : str = os.path.join(__lowerCamelCase , "data" ) self._create_dummy_data(data_dir=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = F""" --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ """.split() if gpus > 0: testargs.append(F"""--gpus={gpus}""" ) if is_apex_available(): testargs.append("--fp16" ) else: testargs.append("--gpus=0" ) testargs.append("--distributed_backend=ddp_cpu" ) testargs.append("--num_processes=2" ) _SCREAMING_SNAKE_CASE : Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(__lowerCamelCase , env=self.get_env() ) _SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , "metrics.json" ) with open(__lowerCamelCase ) as f: _SCREAMING_SNAKE_CASE : Tuple = json.load(__lowerCamelCase ) return result @require_torch_gpu def UpperCamelCase_ ( self ) -> int: _SCREAMING_SNAKE_CASE : Tuple = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu def UpperCamelCase_ ( self ) -> Tuple: _SCREAMING_SNAKE_CASE : List[str] = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_gpu @require_ray def UpperCamelCase_ ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu @require_ray def UpperCamelCase_ ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : str = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
370
from math import factorial def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ): # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k" ) return factorial(__lowerCamelCase ) // (factorial(__lowerCamelCase ) * factorial(n - k )) if __name__ == "__main__": print( 'The number of five-card hands possible from a standard', f"fifty-two card deck is: {combinations(52, 5)}\n", ) print( 'If a class of 40 students must be arranged into groups of', f"4 for group projects, there are {combinations(40, 4)} ways", 'to arrange them.\n', ) print( 'If 10 teams are competing in a Formula One race, there', f"are {combinations(10, 3)} ways that first, second and", 'third place can be awarded.', )
325
0
def lowerCamelCase__ (__lowerCamelCase ): _SCREAMING_SNAKE_CASE : Any = generate_pascal_triangle(__lowerCamelCase ) for row_idx in range(__lowerCamelCase ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=" " ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx], end=" " ) else: print(triangle[row_idx][col_idx], end="" ) print() def lowerCamelCase__ (__lowerCamelCase ): if not isinstance(__lowerCamelCase, __lowerCamelCase ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) _SCREAMING_SNAKE_CASE : list[list[int]] = [] for current_row_idx in range(__lowerCamelCase ): _SCREAMING_SNAKE_CASE : str = populate_current_row(__lowerCamelCase, __lowerCamelCase ) triangle.append(__lowerCamelCase ) return triangle def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ): _SCREAMING_SNAKE_CASE : Union[str, Any] = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 _SCREAMING_SNAKE_CASE : Union[str, Any] = 1, 1 for current_col_idx in range(1, __lowerCamelCase ): calculate_current_element( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) return current_row def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ): _SCREAMING_SNAKE_CASE : Dict = triangle[current_row_idx - 1][current_col_idx - 1] _SCREAMING_SNAKE_CASE : List[Any] = triangle[current_row_idx - 1][current_col_idx] _SCREAMING_SNAKE_CASE : List[Any] = above_to_left_elt + above_to_right_elt def lowerCamelCase__ (__lowerCamelCase ): if not isinstance(__lowerCamelCase, __lowerCamelCase ): raise TypeError("The input value of 'num_rows' should be 'int'" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( "The input value of 'num_rows' should be greater than or equal to 0" ) _SCREAMING_SNAKE_CASE : list[list[int]] = [[1]] for row_index in range(1, __lowerCamelCase ): _SCREAMING_SNAKE_CASE : Union[str, Any] = [0] + result[-1] + [0] _SCREAMING_SNAKE_CASE : Union[str, Any] = row_index + 1 # Calculate the number of distinct elements in a row _SCREAMING_SNAKE_CASE : str = sum(divmod(__lowerCamelCase, 2 ) ) _SCREAMING_SNAKE_CASE : Tuple = [ temp_row[i - 1] + temp_row[i] for i in range(1, distinct_elements + 1 ) ] _SCREAMING_SNAKE_CASE : Tuple = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() _SCREAMING_SNAKE_CASE : str = row_first_half + row_second_half result.append(__lowerCamelCase ) return result def lowerCamelCase__ (): from collections.abc import Callable from timeit import timeit def benchmark_a_function(__lowerCamelCase, __lowerCamelCase ) -> None: _SCREAMING_SNAKE_CASE : List[str] = f"""{func.__name__}({value})""" _SCREAMING_SNAKE_CASE : List[str] = timeit(f"""__main__.{call}""", setup="import __main__" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(f"""{call:38} -- {timing:.4f} seconds""" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(__lowerCamelCase, __lowerCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
371
import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class lowerCAmelCase__( __lowercase , __lowercase ): '''simple docstring''' @register_to_config def __init__( self , __lowerCamelCase = 1_2_8 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2000.0 , __lowerCamelCase = 7_6_8 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_2 , __lowerCamelCase = 6_4 , __lowerCamelCase = 2_0_4_8 , __lowerCamelCase = 0.1 , ) -> int: super().__init__() _SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential( nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , ) _SCREAMING_SNAKE_CASE : str = nn.Embedding(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(p=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList() for lyr_num in range(__lowerCamelCase ): # FiLM conditional T5 decoder _SCREAMING_SNAKE_CASE : Optional[int] = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase ) self.decoders.append(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = nn.Dropout(p=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]: _SCREAMING_SNAKE_CASE : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. _SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) _SCREAMING_SNAKE_CASE : str = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) _SCREAMING_SNAKE_CASE : Tuple = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. _SCREAMING_SNAKE_CASE : Optional[int] = torch.broadcast_to( torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_encoding(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = self.continuous_inputs_projection(__lowerCamelCase ) inputs += position_encodings _SCREAMING_SNAKE_CASE : Any = self.dropout(__lowerCamelCase ) # decoder: No padding present. _SCREAMING_SNAKE_CASE : Any = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. _SCREAMING_SNAKE_CASE : List[str] = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks] # cross attend style: concat encodings _SCREAMING_SNAKE_CASE : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) _SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: _SCREAMING_SNAKE_CASE : Optional[Any] = lyr( __lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0] _SCREAMING_SNAKE_CASE : int = self.decoder_norm(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = self.post_dropout(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = self.spec_out(__lowerCamelCase ) return spec_out class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> Dict: super().__init__() _SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE : int = self.layer[0]( __lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , ) if encoder_hidden_states is not None: _SCREAMING_SNAKE_CASE : str = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to( encoder_hidden_states.dtype ) _SCREAMING_SNAKE_CASE : Tuple = self.layer[1]( __lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , ) # Apply Film Conditional Feed Forward layer _SCREAMING_SNAKE_CASE : Optional[Any] = self.layer[-1](__lowerCamelCase , __lowerCamelCase ) return (hidden_states,) class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: super().__init__() _SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]: # pre_self_attention_layer_norm _SCREAMING_SNAKE_CASE : int = self.layer_norm(__lowerCamelCase ) if conditioning_emb is not None: _SCREAMING_SNAKE_CASE : Any = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase ) # Self-attention block _SCREAMING_SNAKE_CASE : Optional[int] = self.attention(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(__lowerCamelCase ) return hidden_states class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]: super().__init__() _SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]: _SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = self.attention( __lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + self.dropout(__lowerCamelCase ) return layer_output class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]: super().__init__() _SCREAMING_SNAKE_CASE : Tuple = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> List[str]: _SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(__lowerCamelCase ) if conditioning_emb is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.film(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = self.DenseReluDense(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = hidden_states + self.dropout(__lowerCamelCase ) return hidden_states class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: super().__init__() _SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = NewGELUActivation() def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any: _SCREAMING_SNAKE_CASE : Dict = self.act(self.wi_a(__lowerCamelCase ) ) _SCREAMING_SNAKE_CASE : Dict = self.wi_a(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = hidden_gelu * hidden_linear _SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = self.wo(__lowerCamelCase ) return hidden_states class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> int: super().__init__() _SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(__lowerCamelCase ) ) _SCREAMING_SNAKE_CASE : str = eps def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]: # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 _SCREAMING_SNAKE_CASE : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: _SCREAMING_SNAKE_CASE : str = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class lowerCAmelCase__( nn.Module ): '''simple docstring''' def UpperCamelCase_ ( self , __lowerCamelCase ) -> torch.Tensor: return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__lowerCamelCase , 3.0 )) )) class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: super().__init__() _SCREAMING_SNAKE_CASE : Any = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict: _SCREAMING_SNAKE_CASE : List[Any] = self.scale_bias(__lowerCamelCase ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = torch.chunk(__lowerCamelCase , 2 , -1 ) _SCREAMING_SNAKE_CASE : Optional[int] = x * (1 + scale) + shift return x
325
0
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase__( __lowercase , unittest.TestCase ): '''simple docstring''' __snake_case = GPTaTokenizer __snake_case = GPTaTokenizerFast __snake_case = True __snake_case = {'add_prefix_space': True} __snake_case = False def UpperCamelCase_ ( self ) -> Optional[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _SCREAMING_SNAKE_CASE : Union[str, Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] _SCREAMING_SNAKE_CASE : List[str] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) _SCREAMING_SNAKE_CASE : Union[str, Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] _SCREAMING_SNAKE_CASE : str = {"unk_token": "<unk>"} _SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) _SCREAMING_SNAKE_CASE : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__lowerCamelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__lowerCamelCase ) ) def UpperCamelCase_ ( self , **__lowerCamelCase ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def UpperCamelCase_ ( self , **__lowerCamelCase ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase ) -> int: _SCREAMING_SNAKE_CASE : int = "lower newer" _SCREAMING_SNAKE_CASE : str = "lower newer" return input_text, output_text def UpperCamelCase_ ( self ) -> str: _SCREAMING_SNAKE_CASE : str = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _SCREAMING_SNAKE_CASE : Union[str, Any] = "lower newer" _SCREAMING_SNAKE_CASE : Any = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] _SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize(__lowerCamelCase , add_prefix_space=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = tokens + [tokenizer.unk_token] _SCREAMING_SNAKE_CASE : Union[str, Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) def UpperCamelCase_ ( self ) -> Union[str, Any]: if not self.test_rust_tokenizer: return _SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() _SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer(add_prefix_space=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = "lower newer" # Testing tokenization _SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize(__lowerCamelCase , add_prefix_space=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = rust_tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) # Testing conversion to ids without special tokens _SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) # Testing conversion to ids with special tokens _SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer(add_prefix_space=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = tokenizer.encode(__lowerCamelCase , add_prefix_space=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) # Testing the unknown token _SCREAMING_SNAKE_CASE : Tuple = tokens + [rust_tokenizer.unk_token] _SCREAMING_SNAKE_CASE : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]: # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def UpperCamelCase_ ( self , __lowerCamelCase=1_5 ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) # Simple input _SCREAMING_SNAKE_CASE : Tuple = "This is a simple input" _SCREAMING_SNAKE_CASE : Dict = ["This is a simple input 1", "This is a simple input 2"] _SCREAMING_SNAKE_CASE : Optional[int] = ("This is a simple input", "This is a pair") _SCREAMING_SNAKE_CASE : int = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" ) # Simple input self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" ) # Simple input self.assertRaises( __lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , ) # Pair input self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" ) # Pair input self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" ) # Pair input self.assertRaises( __lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , ) def UpperCamelCase_ ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input _SCREAMING_SNAKE_CASE : str = "This is a simple input" _SCREAMING_SNAKE_CASE : int = ["This is a simple input looooooooong", "This is a simple input"] _SCREAMING_SNAKE_CASE : Tuple = ("This is a simple input", "This is a pair") _SCREAMING_SNAKE_CASE : Dict = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] _SCREAMING_SNAKE_CASE : List[Any] = tokenizer.pad_token_id _SCREAMING_SNAKE_CASE : List[str] = tokenizer(__lowerCamelCase , padding="max_length" , max_length=3_0 , return_tensors="np" ) _SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , truncate=__lowerCamelCase , return_tensors="np" ) _SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(*__lowerCamelCase , padding="max_length" , max_length=6_0 , return_tensors="np" ) _SCREAMING_SNAKE_CASE : Any = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , truncate=__lowerCamelCase , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 3_0 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 3_3 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 6_0 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 5_2 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def UpperCamelCase_ ( self ) -> Any: _SCREAMING_SNAKE_CASE : List[Any] = "$$$" _SCREAMING_SNAKE_CASE : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowerCamelCase , add_bos_token=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = "This is a simple input" _SCREAMING_SNAKE_CASE : List[Any] = ["This is a simple input 1", "This is a simple input 2"] _SCREAMING_SNAKE_CASE : List[str] = tokenizer.bos_token_id _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = tokenizer(__lowerCamelCase ) self.assertEqual(out_s.input_ids[0] , __lowerCamelCase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) _SCREAMING_SNAKE_CASE : int = tokenizer.decode(out_s.input_ids ) _SCREAMING_SNAKE_CASE : str = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , __lowerCamelCase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def UpperCamelCase_ ( self ) -> str: pass def UpperCamelCase_ ( self ) -> List[Any]: # TODO: change to self.get_tokenizers() when the fast version is implemented _SCREAMING_SNAKE_CASE : List[str] = [self.get_tokenizer(do_lower_case=__lowerCamelCase , add_bos_token=__lowerCamelCase )] for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): _SCREAMING_SNAKE_CASE : int = "Encode this." _SCREAMING_SNAKE_CASE : List[Any] = "This one too please." _SCREAMING_SNAKE_CASE : int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) encoded_sequence += tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = tokenizer.encode_plus( __lowerCamelCase , __lowerCamelCase , add_special_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , ) _SCREAMING_SNAKE_CASE : Tuple = encoded_sequence_dict["input_ids"] _SCREAMING_SNAKE_CASE : Optional[Any] = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) _SCREAMING_SNAKE_CASE : List[Any] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(__lowerCamelCase ) ] _SCREAMING_SNAKE_CASE : Dict = [x for x in filtered_sequence if x is not None] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) @require_tokenizers class lowerCAmelCase__( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ) -> Tuple: # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = "A photo of a cat" _SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode( __lowerCamelCase , ) self.assertEqual(__lowerCamelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained("test_opt" ) _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("./test_opt" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode( __lowerCamelCase , ) self.assertEqual(__lowerCamelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) def UpperCamelCase_ ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Tuple = "A photo of a cat" _SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode( __lowerCamelCase , ) # Same as above self.assertEqual(__lowerCamelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def UpperCamelCase_ ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = "bos" _SCREAMING_SNAKE_CASE : Dict = tokenizer.get_vocab()["bos"] _SCREAMING_SNAKE_CASE : Optional[int] = "A photo of a cat" _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode( __lowerCamelCase , ) # We changed the bos token self.assertEqual(__lowerCamelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained("./tok" ) _SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) _SCREAMING_SNAKE_CASE : Any = tokenizer.encode( __lowerCamelCase , ) self.assertEqual(__lowerCamelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
350
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ): _SCREAMING_SNAKE_CASE : Tuple = [0 for i in range(r + 1 )] # nc0 = 1 _SCREAMING_SNAKE_CASE : Optional[int] = 1 for i in range(1, n + 1 ): # to compute current row from previous row. _SCREAMING_SNAKE_CASE : Union[str, Any] = min(__lowerCamelCase, __lowerCamelCase ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
325
0
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase = " " ): _SCREAMING_SNAKE_CASE : Dict = [] _SCREAMING_SNAKE_CASE : List[Any] = 0 for index, char in enumerate(__lowerCamelCase ): if char == separator: split_words.append(string[last_index:index] ) _SCREAMING_SNAKE_CASE : List[Any] = index + 1 elif index + 1 == len(__lowerCamelCase ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
351
import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever UpperCamelCase__ =logging.getLogger(__name__) class lowerCAmelCase__( __lowercase ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> Optional[Any]: super().__init__( __lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , ) _SCREAMING_SNAKE_CASE : List[Any] = None def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any: logger.info("initializing retrieval" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("dist initialized" ) # needs to be set manually _SCREAMING_SNAKE_CASE : List[str] = self._infer_socket_ifname() # avoid clash with the NCCL port _SCREAMING_SNAKE_CASE : List[Any] = str(distributed_port + 1 ) _SCREAMING_SNAKE_CASE : int = dist.new_group(ranks=__lowerCamelCase , backend="gloo" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("dist not initialized / main" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def UpperCamelCase_ ( self ) -> Optional[Any]: return dist.get_rank(group=self.process_group ) == 0 def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=torch.floataa ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = torch.empty(__lowerCamelCase , dtype=__lowerCamelCase ) dist.scatter(__lowerCamelCase , src=0 , scatter_list=__lowerCamelCase , group=self.process_group ) return target_tensor def UpperCamelCase_ ( self ) -> Tuple: _SCREAMING_SNAKE_CASE : int = psutil.net_if_addrs() # a hacky way to deal with varying network interface names _SCREAMING_SNAKE_CASE : Any = next((addr for addr in addrs if addr.startswith("e" )) , __lowerCamelCase ) return ifname def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple[np.ndarray, List[dict]]: # single GPU training if not dist.is_initialized(): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self._main_retrieve(__lowerCamelCase , __lowerCamelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase ) # distributed training _SCREAMING_SNAKE_CASE : Union[str, Any] = dist.get_world_size(group=self.process_group ) # gather logic _SCREAMING_SNAKE_CASE : Any = None if self._is_main(): _SCREAMING_SNAKE_CASE : Optional[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCamelCase )] dist.gather(torch.tensor(__lowerCamelCase ) , dst=0 , gather_list=__lowerCamelCase , group=self.process_group ) # scatter logic _SCREAMING_SNAKE_CASE : Optional[int] = question_hidden_states.shape[0] _SCREAMING_SNAKE_CASE : Optional[Any] = [] _SCREAMING_SNAKE_CASE : Optional[int] = [] if self._is_main(): assert len(__lowerCamelCase ) == world_size _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self._main_retrieve(torch.cat(__lowerCamelCase ).numpy() , __lowerCamelCase ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Tuple = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = self._scattered(__lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa ) _SCREAMING_SNAKE_CASE : Optional[Any] = self._scattered(__lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCamelCase )
325
0
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process UpperCamelCase__ =logging.getLogger(__name__) def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ): return (preds == labels).mean() @dataclass class lowerCAmelCase__: '''simple docstring''' __snake_case = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) __snake_case = field( default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __snake_case = field( default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) __snake_case = field( default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class lowerCAmelCase__: '''simple docstring''' __snake_case = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} ) __snake_case = field(metadata={'help': 'Should contain the data files for the task.'} ) __snake_case = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __snake_case = field( default=__lowercase , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def lowerCamelCase__ (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) _SCREAMING_SNAKE_CASE : List[str] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s", __lowerCamelCase ) # Set seed set_seed(training_args.seed ) try: _SCREAMING_SNAKE_CASE : List[Any] = processors[data_args.task_name]() _SCREAMING_SNAKE_CASE : Tuple = processor.get_labels() _SCREAMING_SNAKE_CASE : str = len(__lowerCamelCase ) except KeyError: raise ValueError("Task not found: %s" % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=__lowerCamelCase, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, ) _SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) _SCREAMING_SNAKE_CASE : List[str] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=__lowerCamelCase, cache_dir=model_args.cache_dir, ) # Get datasets _SCREAMING_SNAKE_CASE : Any = ( MultipleChoiceDataset( data_dir=data_args.data_dir, tokenizer=__lowerCamelCase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, ) if training_args.do_train else None ) _SCREAMING_SNAKE_CASE : int = ( MultipleChoiceDataset( data_dir=data_args.data_dir, tokenizer=__lowerCamelCase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, ) if training_args.do_eval else None ) def compute_metrics(__lowerCamelCase ) -> Dict: _SCREAMING_SNAKE_CASE : Optional[int] = np.argmax(p.predictions, axis=1 ) return {"acc": simple_accuracy(__lowerCamelCase, p.label_ids )} # Data collator _SCREAMING_SNAKE_CASE : List[str] = DataCollatorWithPadding(__lowerCamelCase, pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer _SCREAMING_SNAKE_CASE : List[str] = Trainer( model=__lowerCamelCase, args=__lowerCamelCase, train_dataset=__lowerCamelCase, eval_dataset=__lowerCamelCase, compute_metrics=__lowerCamelCase, data_collator=__lowerCamelCase, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _SCREAMING_SNAKE_CASE : Optional[int] = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) _SCREAMING_SNAKE_CASE : str = trainer.evaluate() _SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir, "eval_results.txt" ) if trainer.is_world_master(): with open(__lowerCamelCase, "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(" %s = %s", __lowerCamelCase, __lowerCamelCase ) writer.write("%s = %s\n" % (key, value) ) results.update(__lowerCamelCase ) return results def lowerCamelCase__ (__lowerCamelCase ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
352
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ =logging.get_logger(__name__) UpperCamelCase__ ={ 'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json', } class lowerCAmelCase__( __lowercase ): '''simple docstring''' __snake_case = 'timesformer' def __init__( self , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=8 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-6 , __lowerCamelCase=True , __lowerCamelCase="divided_space_time" , __lowerCamelCase=0 , **__lowerCamelCase , ) -> List[str]: super().__init__(**__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = image_size _SCREAMING_SNAKE_CASE : str = patch_size _SCREAMING_SNAKE_CASE : str = num_channels _SCREAMING_SNAKE_CASE : str = num_frames _SCREAMING_SNAKE_CASE : Dict = hidden_size _SCREAMING_SNAKE_CASE : Any = num_hidden_layers _SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads _SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size _SCREAMING_SNAKE_CASE : Optional[int] = hidden_act _SCREAMING_SNAKE_CASE : int = hidden_dropout_prob _SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : int = initializer_range _SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps _SCREAMING_SNAKE_CASE : List[str] = qkv_bias _SCREAMING_SNAKE_CASE : Tuple = attention_type _SCREAMING_SNAKE_CASE : Union[str, Any] = drop_path_rate
325
0
from __future__ import annotations def lowerCamelCase__ (__lowerCamelCase = 4 ): _SCREAMING_SNAKE_CASE : List[str] = abs(__lowerCamelCase ) or 4 return [[1 + x + y * row_size for x in range(__lowerCamelCase )] for y in range(__lowerCamelCase )] def lowerCamelCase__ (__lowerCamelCase ): return reverse_row(transpose(__lowerCamelCase ) ) # OR.. transpose(reverse_column(matrix)) def lowerCamelCase__ (__lowerCamelCase ): return reverse_row(reverse_column(__lowerCamelCase ) ) # OR.. reverse_column(reverse_row(matrix)) def lowerCamelCase__ (__lowerCamelCase ): return reverse_column(transpose(__lowerCamelCase ) ) # OR.. transpose(reverse_row(matrix)) def lowerCamelCase__ (__lowerCamelCase ): _SCREAMING_SNAKE_CASE : str = [list(__lowerCamelCase ) for x in zip(*__lowerCamelCase )] return matrix def lowerCamelCase__ (__lowerCamelCase ): _SCREAMING_SNAKE_CASE : str = matrix[::-1] return matrix def lowerCamelCase__ (__lowerCamelCase ): _SCREAMING_SNAKE_CASE : Dict = [x[::-1] for x in matrix] return matrix def lowerCamelCase__ (__lowerCamelCase ): for i in matrix: print(*__lowerCamelCase ) if __name__ == "__main__": UpperCamelCase__ =make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) UpperCamelCase__ =make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) UpperCamelCase__ =make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
353
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ =logging.get_logger(__name__) UpperCamelCase__ ={'vocab_file': 'spiece.model'} UpperCamelCase__ ={ 'vocab_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model', } } UpperCamelCase__ ={ 'albert-base-v1': 512, 'albert-large-v1': 512, 'albert-xlarge-v1': 512, 'albert-xxlarge-v1': 512, 'albert-base-v2': 512, 'albert-large-v2': 512, 'albert-xlarge-v2': 512, 'albert-xxlarge-v2': 512, } UpperCamelCase__ ='▁' class lowerCAmelCase__( __lowercase ): '''simple docstring''' __snake_case = VOCAB_FILES_NAMES __snake_case = PRETRAINED_VOCAB_FILES_MAP __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase="[CLS]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<unk>" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<pad>" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , __lowerCamelCase = None , **__lowerCamelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _SCREAMING_SNAKE_CASE : List[Any] = ( AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase , normalized=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token ) _SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , ) _SCREAMING_SNAKE_CASE : Dict = do_lower_case _SCREAMING_SNAKE_CASE : List[Any] = remove_space _SCREAMING_SNAKE_CASE : str = keep_accents _SCREAMING_SNAKE_CASE : Optional[int] = vocab_file _SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCamelCase ) @property def UpperCamelCase_ ( self ) -> Optional[Any]: return len(self.sp_model ) def UpperCamelCase_ ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> List[Any]: _SCREAMING_SNAKE_CASE : str = self.__dict__.copy() _SCREAMING_SNAKE_CASE : Optional[Any] = None return state def __setstate__( self , __lowerCamelCase ) -> Tuple: _SCREAMING_SNAKE_CASE : Optional[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _SCREAMING_SNAKE_CASE : Optional[int] = {} _SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]: if self.remove_space: _SCREAMING_SNAKE_CASE : List[str] = " ".join(inputs.strip().split() ) else: _SCREAMING_SNAKE_CASE : Optional[Any] = inputs _SCREAMING_SNAKE_CASE : str = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: _SCREAMING_SNAKE_CASE : str = unicodedata.normalize("NFKD" , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] ) if self.do_lower_case: _SCREAMING_SNAKE_CASE : Dict = outputs.lower() return outputs def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]: _SCREAMING_SNAKE_CASE : int = self.preprocess_text(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = [] for piece in pieces: if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): _SCREAMING_SNAKE_CASE : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _SCREAMING_SNAKE_CASE : Union[str, Any] = cur_pieces[1:] else: _SCREAMING_SNAKE_CASE : Tuple = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__lowerCamelCase ) else: new_pieces.append(__lowerCamelCase ) return new_pieces def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[Any]: return self.sp_model.PieceToId(__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase ) -> str: return self.sp_model.IdToPiece(__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict: _SCREAMING_SNAKE_CASE : Dict = [] _SCREAMING_SNAKE_CASE : List[str] = "" _SCREAMING_SNAKE_CASE : List[Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__lowerCamelCase ) + token _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Optional[Any] = [] else: current_sub_tokens.append(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = False out_string += self.sp_model.decode(__lowerCamelCase ) return out_string.strip() def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]: _SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id] _SCREAMING_SNAKE_CASE : int = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is not None: return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1] def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]: _SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id] _SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _SCREAMING_SNAKE_CASE : List[Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , "wb" ) as fi: _SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,)
325
0
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ =get_tests_dir('fixtures/test_sentencepiece.model') if is_sentencepiece_available(): import sentencepiece as sp UpperCamelCase__ =5 UpperCamelCase__ =10 @require_sentencepiece @require_tokenizers class lowerCAmelCase__( __lowercase , unittest.TestCase ): '''simple docstring''' __snake_case = SpeechaTextTokenizer __snake_case = False __snake_case = True def UpperCamelCase_ ( self ) -> List[str]: super().setUp() _SCREAMING_SNAKE_CASE : Union[str, Any] = sp.SentencePieceProcessor() spm_model.Load(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = ["<s>", "<pad>", "</s>", "<unk>"] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__lowerCamelCase ) )] _SCREAMING_SNAKE_CASE : str = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) _SCREAMING_SNAKE_CASE : Tuple = Path(self.tmpdirname ) save_json(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] ) _SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE : Any = "<pad>" _SCREAMING_SNAKE_CASE : List[str] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def UpperCamelCase_ ( self ) -> Any: _SCREAMING_SNAKE_CASE : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(__lowerCamelCase ) , 1_0_0_1 ) def UpperCamelCase_ ( self ) -> str: self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_1 ) def UpperCamelCase_ ( self ) -> List[str]: _SCREAMING_SNAKE_CASE : Tuple = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) _SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize("This is a test" ) self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6] , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __lowerCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) _SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8] ) _SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def UpperCamelCase_ ( self ) -> str: # fmt: off _SCREAMING_SNAKE_CASE : Optional[int] = {"input_ids": [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , ) @require_sentencepiece class lowerCAmelCase__( unittest.TestCase ): '''simple docstring''' __snake_case = 'valhalla/s2t_mustc_multilinguial_medium' __snake_case = 'C\'est trop cool' __snake_case = 'Esto es genial' @classmethod def UpperCamelCase_ ( cls ) -> Dict: _SCREAMING_SNAKE_CASE : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def UpperCamelCase_ ( self ) -> int: self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 1_1 ) def UpperCamelCase_ ( self ) -> List[Any]: self.assertEqual(self.tokenizer.vocab_size , 1_0_0_0_0 ) def UpperCamelCase_ ( self ) -> Any: self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids ) _SCREAMING_SNAKE_CASE : Optional[Any] = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2] _SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase ) def UpperCamelCase_ ( self ) -> str: _SCREAMING_SNAKE_CASE : Tuple = "fr" _SCREAMING_SNAKE_CASE : Dict = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , __lowerCamelCase ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def UpperCamelCase_ ( self ) -> Dict: _SCREAMING_SNAKE_CASE : Optional[int] = "fr" self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) _SCREAMING_SNAKE_CASE : List[Any] = "es" self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
354
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor UpperCamelCase__ =logging.get_logger(__name__) class lowerCAmelCase__( __lowercase ): '''simple docstring''' def __init__( self , *__lowerCamelCase , **__lowerCamelCase ) -> None: warnings.warn( "The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use SegformerImageProcessor instead." , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase )
325
0
def lowerCamelCase__ (__lowerCamelCase = 1000 ): _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : List[str] = 0 for a in range(1, n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c _SCREAMING_SNAKE_CASE : List[Any] = (n * n - 2 * a * n) // (2 * n - 2 * a) _SCREAMING_SNAKE_CASE : int = n - a - b if c * c == (a * a + b * b): _SCREAMING_SNAKE_CASE : str = a * b * c if candidate >= product: _SCREAMING_SNAKE_CASE : List[str] = candidate return product if __name__ == "__main__": print(f"{solution() = }")
355
import numpy as np import datasets UpperCamelCase__ ='\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' UpperCamelCase__ ='\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' UpperCamelCase__ ='\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int: # convert to numpy arrays _SCREAMING_SNAKE_CASE : Dict = np.array(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = np.array(__lowerCamelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction _SCREAMING_SNAKE_CASE : Any = X - np.mean(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = np.cov(reference_distribution.T ) try: _SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.inv(__lowerCamelCase ) except np.linalg.LinAlgError: _SCREAMING_SNAKE_CASE : List[str] = np.linalg.pinv(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
325
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase__ ={ 'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ =['LlamaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ =['LlamaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ =[ 'LlamaForCausalLM', 'LlamaModel', 'LlamaPreTrainedModel', 'LlamaForSequenceClassification', ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
356
from __future__ import annotations import math def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): if depth < 0: raise ValueError("Depth cannot be less than 0" ) if len(__lowerCamelCase ) == 0: raise ValueError("Scores cannot be empty" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), ) return min( minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), ) def lowerCamelCase__ (): _SCREAMING_SNAKE_CASE : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 34423] _SCREAMING_SNAKE_CASE : Tuple = math.log(len(__lowerCamelCase ), 2 ) print("Optimal value : ", end="" ) print(minimax(0, 0, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
325
0
def lowerCamelCase__ (__lowerCamelCase = 1000 ): return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1 ) ) if __name__ == "__main__": print(solution())
357
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCamelCase__ ='src/diffusers' UpperCamelCase__ ='.' # This is to make sure the diffusers module imported is the one in the repo. UpperCamelCase__ =importlib.util.spec_from_file_location( 'diffusers', os.path.join(DIFFUSERS_PATH, '__init__.py'), submodule_search_locations=[DIFFUSERS_PATH], ) UpperCamelCase__ =spec.loader.load_module() def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ): return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$", __lowerCamelCase ) is not None def lowerCamelCase__ (__lowerCamelCase ): _SCREAMING_SNAKE_CASE : Any = object_name.split("." ) _SCREAMING_SNAKE_CASE : List[Any] = 0 # First let's find the module where our object lives. _SCREAMING_SNAKE_CASE : Any = parts[i] while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase, f"""{module}.py""" ) ): i += 1 if i < len(__lowerCamelCase ): _SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase, parts[i] ) if i >= len(__lowerCamelCase ): raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" ) with open(os.path.join(__lowerCamelCase, f"""{module}.py""" ), "r", encoding="utf-8", newline="\n" ) as f: _SCREAMING_SNAKE_CASE : int = f.readlines() # Now let's find the class / func in the code! _SCREAMING_SNAKE_CASE : Union[str, Any] = "" _SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""", lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(__lowerCamelCase ): raise ValueError(f""" {object_name} does not match any function or class in {module}.""" ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). _SCREAMING_SNAKE_CASE : Optional[int] = line_index while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index], __lowerCamelCase ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 _SCREAMING_SNAKE_CASE : Optional[int] = lines[start_index:line_index] return "".join(__lowerCamelCase ) UpperCamelCase__ =re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)') UpperCamelCase__ =re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)') UpperCamelCase__ =re.compile(R'<FILL\s+[^>]*>') def lowerCamelCase__ (__lowerCamelCase ): _SCREAMING_SNAKE_CASE : Union[str, Any] = code.split("\n" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = 0 while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0: idx += 1 if idx < len(__lowerCamelCase ): return re.search(R"^(\s*)\S", lines[idx] ).groups()[0] return "" def lowerCamelCase__ (__lowerCamelCase ): _SCREAMING_SNAKE_CASE : Optional[int] = len(get_indent(__lowerCamelCase ) ) > 0 if has_indent: _SCREAMING_SNAKE_CASE : Union[str, Any] = f"""class Bla:\n{code}""" _SCREAMING_SNAKE_CASE : Any = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119, preview=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = black.format_str(__lowerCamelCase, mode=__lowerCamelCase ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = style_docstrings_in_code(__lowerCamelCase ) return result[len("class Bla:\n" ) :] if has_indent else result def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ): with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f: _SCREAMING_SNAKE_CASE : int = f.readlines() _SCREAMING_SNAKE_CASE : Dict = [] _SCREAMING_SNAKE_CASE : Tuple = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(__lowerCamelCase ): _SCREAMING_SNAKE_CASE : Union[str, Any] = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = search.groups() _SCREAMING_SNAKE_CASE : Any = find_code_in_diffusers(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = get_indent(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2 _SCREAMING_SNAKE_CASE : int = theoretical_indent _SCREAMING_SNAKE_CASE : str = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. _SCREAMING_SNAKE_CASE : Any = True while line_index < len(__lowerCamelCase ) and should_continue: line_index += 1 if line_index >= len(__lowerCamelCase ): break _SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index] _SCREAMING_SNAKE_CASE : str = _should_continue(__lowerCamelCase, __lowerCamelCase ) and re.search(f"""^{indent}# End copy""", __lowerCamelCase ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 _SCREAMING_SNAKE_CASE : List[Any] = lines[start_index:line_index] _SCREAMING_SNAKE_CASE : Optional[Any] = "".join(__lowerCamelCase ) # Remove any nested `Copied from` comments to avoid circular copies _SCREAMING_SNAKE_CASE : Dict = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCamelCase ) is None] _SCREAMING_SNAKE_CASE : str = "\n".join(__lowerCamelCase ) # Before comparing, use the `replace_pattern` on the original code. if len(__lowerCamelCase ) > 0: _SCREAMING_SNAKE_CASE : str = replace_pattern.replace("with", "" ).split("," ) _SCREAMING_SNAKE_CASE : Union[str, Any] = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns] for pattern in patterns: if pattern is None: continue _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = pattern.groups() _SCREAMING_SNAKE_CASE : Tuple = re.sub(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) if option.strip() == "all-casing": _SCREAMING_SNAKE_CASE : List[Any] = re.sub(obja.lower(), obja.lower(), __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = re.sub(obja.upper(), obja.upper(), __lowerCamelCase ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line _SCREAMING_SNAKE_CASE : int = blackify(lines[start_index - 1] + theoretical_code ) _SCREAMING_SNAKE_CASE : List[str] = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: _SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:] _SCREAMING_SNAKE_CASE : int = start_index + 1 if overwrite and len(__lowerCamelCase ) > 0: # Warn the user a file has been modified. print(f"""Detected changes, rewriting {filename}.""" ) with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f: f.writelines(__lowerCamelCase ) return diffs def lowerCamelCase__ (__lowerCamelCase = False ): _SCREAMING_SNAKE_CASE : int = glob.glob(os.path.join(__lowerCamelCase, "**/*.py" ), recursive=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = [] for filename in all_files: _SCREAMING_SNAKE_CASE : int = is_copy_consistent(__lowerCamelCase, __lowerCamelCase ) diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs] if not overwrite and len(__lowerCamelCase ) > 0: _SCREAMING_SNAKE_CASE : Dict = "\n".join(__lowerCamelCase ) raise Exception( "Found the following copy inconsistencies:\n" + diff + "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." ) if __name__ == "__main__": UpperCamelCase__ =argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') UpperCamelCase__ =parser.parse_args() check_copies(args.fix_and_overwrite)
325
0
from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class lowerCAmelCase__: '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=[1, 1, 2] , __lowerCamelCase=1 , __lowerCamelCase=3_2 , __lowerCamelCase=4 , __lowerCamelCase=8 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu_new" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0 , __lowerCamelCase=5_1_2 , __lowerCamelCase=3 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , __lowerCamelCase=False , ) -> List[Any]: _SCREAMING_SNAKE_CASE : int = parent _SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size _SCREAMING_SNAKE_CASE : Optional[Any] = seq_length _SCREAMING_SNAKE_CASE : Any = is_training _SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask _SCREAMING_SNAKE_CASE : List[str] = use_token_type_ids _SCREAMING_SNAKE_CASE : List[str] = use_labels _SCREAMING_SNAKE_CASE : str = vocab_size _SCREAMING_SNAKE_CASE : Tuple = block_sizes _SCREAMING_SNAKE_CASE : List[str] = num_decoder_layers _SCREAMING_SNAKE_CASE : Tuple = d_model _SCREAMING_SNAKE_CASE : str = n_head _SCREAMING_SNAKE_CASE : str = d_head _SCREAMING_SNAKE_CASE : Optional[int] = d_inner _SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act _SCREAMING_SNAKE_CASE : Any = hidden_dropout _SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout _SCREAMING_SNAKE_CASE : Optional[Any] = activation_dropout _SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings _SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size _SCREAMING_SNAKE_CASE : List[Any] = 2 _SCREAMING_SNAKE_CASE : Tuple = num_labels _SCREAMING_SNAKE_CASE : int = num_choices _SCREAMING_SNAKE_CASE : Union[str, Any] = scope _SCREAMING_SNAKE_CASE : List[str] = initializer_std # Used in the tests to check the size of the first attention layer _SCREAMING_SNAKE_CASE : Dict = n_head # Used in the tests to check the size of the first hidden state _SCREAMING_SNAKE_CASE : Union[str, Any] = self.d_model # Used in the tests to check the number of output hidden states/attentions _SCREAMING_SNAKE_CASE : Union[str, Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: _SCREAMING_SNAKE_CASE : Any = self.num_hidden_layers + 2 def UpperCamelCase_ ( self ) -> Dict: _SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE : List[str] = None if self.use_input_mask: _SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE : str = None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _SCREAMING_SNAKE_CASE : Union[str, Any] = None _SCREAMING_SNAKE_CASE : Any = None _SCREAMING_SNAKE_CASE : Union[str, Any] = None if self.use_labels: _SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_choices ) _SCREAMING_SNAKE_CASE : int = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[str]: _SCREAMING_SNAKE_CASE : Tuple = TFFunnelModel(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _SCREAMING_SNAKE_CASE : Optional[int] = model(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = [input_ids, input_mask] _SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) _SCREAMING_SNAKE_CASE : str = False _SCREAMING_SNAKE_CASE : Dict = TFFunnelModel(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) _SCREAMING_SNAKE_CASE : Tuple = False _SCREAMING_SNAKE_CASE : Any = TFFunnelModel(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : Union[str, Any] = TFFunnelBaseModel(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = [input_ids, input_mask] _SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Dict = TFFunnelBaseModel(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) _SCREAMING_SNAKE_CASE : Any = False _SCREAMING_SNAKE_CASE : int = TFFunnelBaseModel(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Any: _SCREAMING_SNAKE_CASE : Union[str, Any] = TFFunnelForPreTraining(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> str: _SCREAMING_SNAKE_CASE : Dict = TFFunnelForMaskedLM(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]: _SCREAMING_SNAKE_CASE : Any = self.num_labels _SCREAMING_SNAKE_CASE : Any = TFFunnelForSequenceClassification(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Dict: _SCREAMING_SNAKE_CASE : List[str] = self.num_choices _SCREAMING_SNAKE_CASE : str = TFFunnelForMultipleChoice(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE : Any = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE : int = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE : List[Any] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _SCREAMING_SNAKE_CASE : Union[str, Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> str: _SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels _SCREAMING_SNAKE_CASE : Any = TFFunnelForTokenClassification(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]: _SCREAMING_SNAKE_CASE : List[str] = TFFunnelForQuestionAnswering(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _SCREAMING_SNAKE_CASE : Optional[Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self ) -> int: _SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs() ( _SCREAMING_SNAKE_CASE ) : Dict = config_and_inputs _SCREAMING_SNAKE_CASE : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ): '''simple docstring''' __snake_case = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) __snake_case = ( { 'feature-extraction': (TFFunnelBaseModel, TFFunnelModel), 'fill-mask': TFFunnelForMaskedLM, 'question-answering': TFFunnelForQuestionAnswering, 'text-classification': TFFunnelForSequenceClassification, 'token-classification': TFFunnelForTokenClassification, 'zero-shot': TFFunnelForSequenceClassification, } if is_tf_available() else {} ) __snake_case = False __snake_case = False def UpperCamelCase_ ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE : List[str] = TFFunnelModelTester(self ) _SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=__lowerCamelCase ) def UpperCamelCase_ ( self ) -> Dict: self.config_tester.run_common_tests() def UpperCamelCase_ ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def UpperCamelCase_ ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase ) def UpperCamelCase_ ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase ) def UpperCamelCase_ ( self ) -> Tuple: _SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase ) def UpperCamelCase_ ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase ) @require_tf class lowerCAmelCase__( __lowercase , unittest.TestCase ): '''simple docstring''' __snake_case = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) __snake_case = False __snake_case = False def UpperCamelCase_ ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE : List[str] = TFFunnelModelTester(self , base=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=__lowerCamelCase ) def UpperCamelCase_ ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() def UpperCamelCase_ ( self ) -> Dict: _SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*__lowerCamelCase ) def UpperCamelCase_ ( self ) -> Tuple: _SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase ) def UpperCamelCase_ ( self ) -> Dict: _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
358
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class lowerCAmelCase__: '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ) -> Any: _SCREAMING_SNAKE_CASE : str = parent _SCREAMING_SNAKE_CASE : List[Any] = 1_3 _SCREAMING_SNAKE_CASE : List[str] = 7 _SCREAMING_SNAKE_CASE : Dict = True _SCREAMING_SNAKE_CASE : List[str] = True _SCREAMING_SNAKE_CASE : int = True _SCREAMING_SNAKE_CASE : Union[str, Any] = True _SCREAMING_SNAKE_CASE : int = 9_9 _SCREAMING_SNAKE_CASE : str = 3_8_4 _SCREAMING_SNAKE_CASE : List[Any] = 2 _SCREAMING_SNAKE_CASE : Dict = 4 _SCREAMING_SNAKE_CASE : Dict = 3_7 _SCREAMING_SNAKE_CASE : Union[str, Any] = "gelu" _SCREAMING_SNAKE_CASE : str = 0.1 _SCREAMING_SNAKE_CASE : str = 0.1 _SCREAMING_SNAKE_CASE : List[Any] = 5_1_2 _SCREAMING_SNAKE_CASE : Tuple = 1_6 _SCREAMING_SNAKE_CASE : Dict = 2 _SCREAMING_SNAKE_CASE : Any = 0.02 _SCREAMING_SNAKE_CASE : Any = 3 _SCREAMING_SNAKE_CASE : List[str] = 4 _SCREAMING_SNAKE_CASE : List[Any] = 1_2_8 _SCREAMING_SNAKE_CASE : Optional[int] = 2 _SCREAMING_SNAKE_CASE : int = 9 _SCREAMING_SNAKE_CASE : List[str] = 1 _SCREAMING_SNAKE_CASE : List[Any] = None def UpperCamelCase_ ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE : List[str] = None if self.use_input_mask: _SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE : Dict = None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : Union[str, Any] = None _SCREAMING_SNAKE_CASE : Optional[int] = None if self.use_labels: _SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices ) _SCREAMING_SNAKE_CASE : Union[str, Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str: _SCREAMING_SNAKE_CASE : Any = TFConvBertModel(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _SCREAMING_SNAKE_CASE : str = [input_ids, input_mask] _SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: _SCREAMING_SNAKE_CASE : Dict = TFConvBertForMaskedLM(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE : int = self.num_labels _SCREAMING_SNAKE_CASE : str = TFConvBertForSequenceClassification(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str: _SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices _SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForMultipleChoice(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) ) _SCREAMING_SNAKE_CASE : List[Any] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: _SCREAMING_SNAKE_CASE : Dict = self.num_labels _SCREAMING_SNAKE_CASE : Tuple = TFConvBertForTokenClassification(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int: _SCREAMING_SNAKE_CASE : Optional[int] = TFConvBertForQuestionAnswering(config=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self ) -> Tuple: _SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() ( ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ) : List[Any] = config_and_inputs _SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ): '''simple docstring''' __snake_case = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) __snake_case = ( { 'feature-extraction': TFConvBertModel, 'fill-mask': TFConvBertForMaskedLM, 'question-answering': TFConvBertForQuestionAnswering, 'text-classification': TFConvBertForSequenceClassification, 'token-classification': TFConvBertForTokenClassification, 'zero-shot': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) __snake_case = False __snake_case = False __snake_case = False def UpperCamelCase_ ( self ) -> str: _SCREAMING_SNAKE_CASE : int = TFConvBertModelTester(self ) _SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 ) def UpperCamelCase_ ( self ) -> List[Any]: self.config_tester.run_common_tests() def UpperCamelCase_ ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def UpperCamelCase_ ( self ) -> Dict: _SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase ) def UpperCamelCase_ ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase ) def UpperCamelCase_ ( self ) -> Dict: _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase ) def UpperCamelCase_ ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase ) def UpperCamelCase_ ( self ) -> int: _SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase ) @slow def UpperCamelCase_ ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE : Union[str, Any] = True _SCREAMING_SNAKE_CASE : Any = True if hasattr(__lowerCamelCase , "use_cache" ): _SCREAMING_SNAKE_CASE : List[str] = True _SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) _SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase ) for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = len(model(__lowerCamelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase , "saved_model" , "1" ) _SCREAMING_SNAKE_CASE : Optional[Any] = tf.keras.models.load_model(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase ) if self.is_encoder_decoder: _SCREAMING_SNAKE_CASE : List[Any] = outputs["encoder_hidden_states"] _SCREAMING_SNAKE_CASE : Union[str, Any] = outputs["encoder_attentions"] else: _SCREAMING_SNAKE_CASE : List[str] = outputs["hidden_states"] _SCREAMING_SNAKE_CASE : Dict = outputs["attentions"] self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def UpperCamelCase_ ( self ) -> str: _SCREAMING_SNAKE_CASE : Any = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(__lowerCamelCase ) def UpperCamelCase_ ( self ) -> Dict: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE : Dict = True _SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length ) _SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) _SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase ) def check_decoder_attentions_output(__lowerCamelCase ): _SCREAMING_SNAKE_CASE : Union[str, Any] = len(__lowerCamelCase ) self.assertEqual(out_len % 2 , 0 ) _SCREAMING_SNAKE_CASE : Optional[int] = outputs.decoder_attentions self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(__lowerCamelCase ): _SCREAMING_SNAKE_CASE : Optional[Any] = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : Any = True _SCREAMING_SNAKE_CASE : Any = False _SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) _SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase ) self.assertEqual(config.output_hidden_states , __lowerCamelCase ) check_encoder_attentions_output(__lowerCamelCase ) if self.is_encoder_decoder: _SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) self.assertEqual(config.output_hidden_states , __lowerCamelCase ) check_decoder_attentions_output(__lowerCamelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _SCREAMING_SNAKE_CASE : Dict = True _SCREAMING_SNAKE_CASE : List[Any] = model_class(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) self.assertEqual(config.output_hidden_states , __lowerCamelCase ) check_encoder_attentions_output(__lowerCamelCase ) # Check attention is always last and order is fine _SCREAMING_SNAKE_CASE : Union[str, Any] = True _SCREAMING_SNAKE_CASE : Any = True _SCREAMING_SNAKE_CASE : Optional[int] = model_class(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) ) self.assertEqual(model.config.output_hidden_states , __lowerCamelCase ) check_encoder_attentions_output(__lowerCamelCase ) @require_tf class lowerCAmelCase__( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) _SCREAMING_SNAKE_CASE : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] ) _SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase )[0] _SCREAMING_SNAKE_CASE : int = [1, 6, 7_6_8] self.assertEqual(output.shape , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = tf.constant( [ [ [-0.0347_5493, -0.468_6034, -0.3063_8832], [0.2263_7248, -0.2698_8646, -0.742_3424], [0.1032_4868, -0.4501_3508, -0.5828_0784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 )
325
0
from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def lowerCamelCase__ (): _SCREAMING_SNAKE_CASE : List[str] = [randint(-1000, 1000 ) for i in range(10 )] _SCREAMING_SNAKE_CASE : Union[str, Any] = randint(-5000, 5000 ) return (arr, r) UpperCamelCase__ =make_dataset() def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ): for triplet in permutations(__lowerCamelCase, 3 ): if sum(__lowerCamelCase ) == target: return tuple(sorted(__lowerCamelCase ) ) return (0, 0, 0) def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ): arr.sort() _SCREAMING_SNAKE_CASE : Optional[Any] = len(__lowerCamelCase ) for i in range(n - 1 ): _SCREAMING_SNAKE_CASE : int = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def lowerCamelCase__ (): _SCREAMING_SNAKE_CASE : List[Any] = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n" _SCREAMING_SNAKE_CASE : Dict = "\ntriplet_sum1(*dataset)\n" _SCREAMING_SNAKE_CASE : Any = "\ntriplet_sum2(*dataset)\n" _SCREAMING_SNAKE_CASE : List[str] = repeat(setup=__lowerCamelCase, stmt=__lowerCamelCase, repeat=5, number=10000 ) _SCREAMING_SNAKE_CASE : Tuple = repeat(setup=__lowerCamelCase, stmt=__lowerCamelCase, repeat=5, number=10000 ) return (min(__lowerCamelCase ), min(__lowerCamelCase )) if __name__ == "__main__": from doctest import testmod testmod() UpperCamelCase__ =solution_times() print(f"The time for naive implementation is {times[0]}.") print(f"The time for optimized implementation is {times[1]}.")
359
from timeit import timeit def lowerCamelCase__ (__lowerCamelCase ): if number < 0: raise ValueError("the value of input must not be negative" ) _SCREAMING_SNAKE_CASE : str = 0 while number: number &= number - 1 result += 1 return result def lowerCamelCase__ (__lowerCamelCase ): if number < 0: raise ValueError("the value of input must not be negative" ) _SCREAMING_SNAKE_CASE : str = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def lowerCamelCase__ (): def do_benchmark(__lowerCamelCase ) -> None: _SCREAMING_SNAKE_CASE : Tuple = "import __main__ as z" print(f"""Benchmark when {number = }:""" ) print(f"""{get_set_bits_count_using_modulo_operator(__lowerCamelCase ) = }""" ) _SCREAMING_SNAKE_CASE : str = timeit("z.get_set_bits_count_using_modulo_operator(25)", setup=__lowerCamelCase ) print(f"""timeit() runs in {timing} seconds""" ) print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(__lowerCamelCase ) = }""" ) _SCREAMING_SNAKE_CASE : int = timeit( "z.get_set_bits_count_using_brian_kernighans_algorithm(25)", setup=__lowerCamelCase, ) print(f"""timeit() runs in {timing} seconds""" ) for number in (25, 37, 58, 0): do_benchmark(__lowerCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
325
0
def lowerCamelCase__ (__lowerCamelCase ): if not isinstance(__lowerCamelCase, __lowerCamelCase ): raise ValueError("check_bouncy() accepts only integer arguments" ) _SCREAMING_SNAKE_CASE : List[Any] = str(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = "".join(sorted(__lowerCamelCase ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def lowerCamelCase__ (__lowerCamelCase = 99 ): if not 0 < percent < 100: raise ValueError("solution() only accepts values from 0 to 100" ) _SCREAMING_SNAKE_CASE : Any = 0 _SCREAMING_SNAKE_CASE : Optional[int] = 1 while True: if check_bouncy(__lowerCamelCase ): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f"{solution(99)}")
360
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCamelCase__ ={ 'configuration_swiftformer': [ 'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwiftFormerConfig', 'SwiftFormerOnnxConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ =[ 'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwiftFormerForImageClassification', 'SwiftFormerModel', 'SwiftFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
325
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class lowerCAmelCase__( __lowercase ): '''simple docstring''' __snake_case = 'naver-clova-ix/donut-base-finetuned-docvqa' __snake_case = ( 'This is a tool that answers a question about an document (pdf). It takes an input named `document` which ' 'should be the document containing the information, as well as a `question` that is the question about the ' 'document. It returns a text that contains the answer to the question.' ) __snake_case = 'document_qa' __snake_case = AutoProcessor __snake_case = VisionEncoderDecoderModel __snake_case = ['image', 'text'] __snake_case = ['text'] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]: if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." ) super().__init__(*__lowerCamelCase , **__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict: _SCREAMING_SNAKE_CASE : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" _SCREAMING_SNAKE_CASE : Any = task_prompt.replace("{user_input}" , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = self.pre_processor.tokenizer( __lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors="pt" ).input_ids _SCREAMING_SNAKE_CASE : List[Any] = self.pre_processor(__lowerCamelCase , return_tensors="pt" ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def UpperCamelCase_ ( self , __lowerCamelCase ) -> Tuple: return self.model.generate( inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowerCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowerCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowerCamelCase , ).sequences def UpperCamelCase_ ( self , __lowerCamelCase ) -> Tuple: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.pre_processor.batch_decode(__lowerCamelCase )[0] _SCREAMING_SNAKE_CASE : Tuple = sequence.replace(self.pre_processor.tokenizer.eos_token , "" ) _SCREAMING_SNAKE_CASE : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "" ) _SCREAMING_SNAKE_CASE : Any = re.sub(r"<.*?>" , "" , __lowerCamelCase , count=1 ).strip() # remove first task start token _SCREAMING_SNAKE_CASE : Optional[int] = self.pre_processor.tokenajson(__lowerCamelCase ) return sequence["answer"]
361
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () UpperCamelCase__ =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). UpperCamelCase__ =[0, 25, 50] UpperCamelCase__ =[25, 50, 75] UpperCamelCase__ =fuzz.membership.trimf(X, abca) UpperCamelCase__ =fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. UpperCamelCase__ =np.ones(75) UpperCamelCase__ =np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) UpperCamelCase__ =fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) UpperCamelCase__ =fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] UpperCamelCase__ =young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) UpperCamelCase__ =young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] UpperCamelCase__ =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] UpperCamelCase__ =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('Young') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('Middle aged') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('union') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('intersection') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('complement_a') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('difference a/b') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('alg_sum') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('alg_product') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('bdd_sum') plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title('bdd_difference') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
325
0
UpperCamelCase__ ={ 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): _SCREAMING_SNAKE_CASE : Dict = set() # keep track of all the paths to be checked _SCREAMING_SNAKE_CASE : Union[str, Any] = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _SCREAMING_SNAKE_CASE : List[Any] = queue.pop(0 ) # get the last node from the path _SCREAMING_SNAKE_CASE : Union[str, Any] = path[-1] if node not in explored: _SCREAMING_SNAKE_CASE : Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _SCREAMING_SNAKE_CASE : Optional[int] = list(__lowerCamelCase ) new_path.append(__lowerCamelCase ) queue.append(__lowerCamelCase ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(__lowerCamelCase ) # in case there's no path between the 2 nodes return [] def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _SCREAMING_SNAKE_CASE : Any = [start] _SCREAMING_SNAKE_CASE : Dict = set(__lowerCamelCase ) # Keep tab on distances from `start` node. _SCREAMING_SNAKE_CASE : Tuple = {start: 0, target: -1} while queue: _SCREAMING_SNAKE_CASE : List[Any] = queue.pop(0 ) if node == target: _SCREAMING_SNAKE_CASE : Dict = ( dist[node] if dist[target] == -1 else min(dist[target], dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(__lowerCamelCase ) queue.append(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
362
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class lowerCAmelCase__( __lowercase ): '''simple docstring''' __snake_case = ['vqvae'] def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]: super().__init__() self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase ) def UpperCamelCase_ ( self ) -> int: return 5_0 if isinstance(self.scheduler , __lowerCamelCase ) else 1_0_0_0 @torch.no_grad() def __call__( self , __lowerCamelCase = 1 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: _SCREAMING_SNAKE_CASE : List[str] = steps or self.get_default_steps() self.scheduler.set_timesteps(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: _SCREAMING_SNAKE_CASE : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: _SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=__lowerCamelCase , device=self.device , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = noise _SCREAMING_SNAKE_CASE : Optional[int] = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = self.mel.audio_slice_to_image(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape( (input_image.height, input_image.width) ) _SCREAMING_SNAKE_CASE : Optional[int] = (input_image / 2_5_5) * 2 - 1 _SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample( generator=__lowerCamelCase )[0] _SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images if start_step > 0: _SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] ) _SCREAMING_SNAKE_CASE : int = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) _SCREAMING_SNAKE_CASE : Optional[Any] = int(mask_start_secs * pixels_per_second ) _SCREAMING_SNAKE_CASE : Optional[int] = int(mask_end_secs * pixels_per_second ) _SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , __lowerCamelCase ): _SCREAMING_SNAKE_CASE : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["sample"] else: _SCREAMING_SNAKE_CASE : str = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"] if isinstance(self.scheduler , __lowerCamelCase ): _SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step( model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"] else: _SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step( model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"] if mask is not None: if mask_start > 0: _SCREAMING_SNAKE_CASE : str = mask[:, step, :, :mask_start] if mask_end > 0: _SCREAMING_SNAKE_CASE : Dict = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance _SCREAMING_SNAKE_CASE : Optional[Any] = 1 / self.vqvae.config.scaling_factor * images _SCREAMING_SNAKE_CASE : Dict = self.vqvae.decode(__lowerCamelCase )["sample"] _SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 ) _SCREAMING_SNAKE_CASE : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() _SCREAMING_SNAKE_CASE : List[str] = (images * 2_5_5).round().astype("uint8" ) _SCREAMING_SNAKE_CASE : Tuple = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(__lowerCamelCase , mode="RGB" ).convert("L" ) for _ in images) ) _SCREAMING_SNAKE_CASE : Tuple = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) ) @torch.no_grad() def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 5_0 ) -> np.ndarray: assert isinstance(self.scheduler , __lowerCamelCase ) self.scheduler.set_timesteps(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = np.array( [np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] ) _SCREAMING_SNAKE_CASE : Union[str, Any] = (sample / 2_5_5) * 2 - 1 _SCREAMING_SNAKE_CASE : Any = torch.Tensor(__lowerCamelCase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): _SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps _SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.alphas_cumprod[t] _SCREAMING_SNAKE_CASE : List[str] = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) _SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha_prod_t _SCREAMING_SNAKE_CASE : Optional[int] = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"] _SCREAMING_SNAKE_CASE : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output _SCREAMING_SNAKE_CASE : str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) _SCREAMING_SNAKE_CASE : List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def UpperCamelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> torch.Tensor: _SCREAMING_SNAKE_CASE : Any = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) ) return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase )
325
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase__ ={ 'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'], 'convert_funnel_original_tf_checkpoint_to_pytorch': [], 'tokenization_funnel': ['FunnelTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ =['FunnelTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ =[ 'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST', 'FunnelBaseModel', 'FunnelForMaskedLM', 'FunnelForMultipleChoice', 'FunnelForPreTraining', 'FunnelForQuestionAnswering', 'FunnelForSequenceClassification', 'FunnelForTokenClassification', 'FunnelModel', 'FunnelPreTrainedModel', 'load_tf_weights_in_funnel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ =[ 'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFFunnelBaseModel', 'TFFunnelForMaskedLM', 'TFFunnelForMultipleChoice', 'TFFunnelForPreTraining', 'TFFunnelForQuestionAnswering', 'TFFunnelForSequenceClassification', 'TFFunnelForTokenClassification', 'TFFunnelModel', 'TFFunnelPreTrainedModel', ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
363
from __future__ import annotations import typing from collections import Counter def lowerCamelCase__ (__lowerCamelCase ): _SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter() for base in range(1, max_perimeter + 1 ): for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ): _SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(__lowerCamelCase ): _SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def lowerCamelCase__ (__lowerCamelCase = 1000 ): _SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(f"Perimeter {solution()} has maximum solutions")
325
0
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig UpperCamelCase__ =logging.get_logger(__name__) # General docstring UpperCamelCase__ ='RegNetConfig' # Base docstring UpperCamelCase__ ='facebook/regnet-y-040' UpperCamelCase__ =[1, 1088, 7, 7] # Image classification docstring UpperCamelCase__ ='facebook/regnet-y-040' UpperCamelCase__ ='tabby, tabby cat' UpperCamelCase__ =[ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 3 , __lowerCamelCase = 1 , __lowerCamelCase = 1 , __lowerCamelCase = "relu" , ) -> List[str]: super().__init__() _SCREAMING_SNAKE_CASE : Dict = nn.Convad( __lowerCamelCase , __lowerCamelCase , kernel_size=__lowerCamelCase , stride=__lowerCamelCase , padding=kernel_size // 2 , groups=__lowerCamelCase , bias=__lowerCamelCase , ) _SCREAMING_SNAKE_CASE : Any = nn.BatchNormad(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = ACTaFN[activation] if activation is not None else nn.Identity() def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]: _SCREAMING_SNAKE_CASE : List[Any] = self.convolution(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = self.normalization(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = self.activation(__lowerCamelCase ) return hidden_state class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase ) -> List[Any]: super().__init__() _SCREAMING_SNAKE_CASE : int = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) _SCREAMING_SNAKE_CASE : int = config.num_channels def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict: _SCREAMING_SNAKE_CASE : Optional[Any] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) _SCREAMING_SNAKE_CASE : List[Any] = self.embedder(__lowerCamelCase ) return hidden_state class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 2 ) -> Dict: super().__init__() _SCREAMING_SNAKE_CASE : Dict = nn.Convad(__lowerCamelCase , __lowerCamelCase , kernel_size=1 , stride=__lowerCamelCase , bias=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Tuple = nn.BatchNormad(__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase ) -> Tensor: _SCREAMING_SNAKE_CASE : str = self.convolution(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = self.normalization(__lowerCamelCase ) return hidden_state class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Dict: super().__init__() _SCREAMING_SNAKE_CASE : str = nn.AdaptiveAvgPoolad((1, 1) ) _SCREAMING_SNAKE_CASE : Tuple = nn.Sequential( nn.Convad(__lowerCamelCase , __lowerCamelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(__lowerCamelCase , __lowerCamelCase , kernel_size=1 ) , nn.Sigmoid() , ) def UpperCamelCase_ ( self , __lowerCamelCase ) -> str: # b c h w -> b c 1 1 _SCREAMING_SNAKE_CASE : str = self.pooler(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = self.attention(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = hidden_state * attention return hidden_state class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 1 ) -> Any: super().__init__() _SCREAMING_SNAKE_CASE : Union[str, Any] = in_channels != out_channels or stride != 1 _SCREAMING_SNAKE_CASE : str = max(1 , out_channels // config.groups_width ) _SCREAMING_SNAKE_CASE : Dict = ( RegNetShortCut(__lowerCamelCase , __lowerCamelCase , stride=__lowerCamelCase ) if should_apply_shortcut else nn.Identity() ) _SCREAMING_SNAKE_CASE : Tuple = nn.Sequential( RegNetConvLayer(__lowerCamelCase , __lowerCamelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__lowerCamelCase , __lowerCamelCase , stride=__lowerCamelCase , groups=__lowerCamelCase , activation=config.hidden_act ) , RegNetConvLayer(__lowerCamelCase , __lowerCamelCase , kernel_size=1 , activation=__lowerCamelCase ) , ) _SCREAMING_SNAKE_CASE : Dict = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self , __lowerCamelCase ) -> str: _SCREAMING_SNAKE_CASE : Dict = hidden_state _SCREAMING_SNAKE_CASE : Tuple = self.layer(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = self.shortcut(__lowerCamelCase ) hidden_state += residual _SCREAMING_SNAKE_CASE : int = self.activation(__lowerCamelCase ) return hidden_state class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 1 ) -> Optional[int]: super().__init__() _SCREAMING_SNAKE_CASE : Any = in_channels != out_channels or stride != 1 _SCREAMING_SNAKE_CASE : Optional[Any] = max(1 , out_channels // config.groups_width ) _SCREAMING_SNAKE_CASE : int = ( RegNetShortCut(__lowerCamelCase , __lowerCamelCase , stride=__lowerCamelCase ) if should_apply_shortcut else nn.Identity() ) _SCREAMING_SNAKE_CASE : str = nn.Sequential( RegNetConvLayer(__lowerCamelCase , __lowerCamelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__lowerCamelCase , __lowerCamelCase , stride=__lowerCamelCase , groups=__lowerCamelCase , activation=config.hidden_act ) , RegNetSELayer(__lowerCamelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__lowerCamelCase , __lowerCamelCase , kernel_size=1 , activation=__lowerCamelCase ) , ) _SCREAMING_SNAKE_CASE : str = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]: _SCREAMING_SNAKE_CASE : Any = hidden_state _SCREAMING_SNAKE_CASE : Optional[Any] = self.layer(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = self.shortcut(__lowerCamelCase ) hidden_state += residual _SCREAMING_SNAKE_CASE : Union[str, Any] = self.activation(__lowerCamelCase ) return hidden_state class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 2 , __lowerCamelCase = 2 , ) -> Tuple: super().__init__() _SCREAMING_SNAKE_CASE : str = RegNetXLayer if config.layer_type == "x" else RegNetYLayer _SCREAMING_SNAKE_CASE : str = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , stride=__lowerCamelCase , ) , *[layer(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) for _ in range(depth - 1 )] , ) def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[Any]: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.layers(__lowerCamelCase ) return hidden_state class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase ) -> Tuple: super().__init__() _SCREAMING_SNAKE_CASE : Union[str, Any] = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( __lowerCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _SCREAMING_SNAKE_CASE : Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(__lowerCamelCase , config.depths[1:] ): self.stages.append(RegNetStage(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , depth=__lowerCamelCase ) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = False , __lowerCamelCase = True ) -> BaseModelOutputWithNoAttention: _SCREAMING_SNAKE_CASE : str = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _SCREAMING_SNAKE_CASE : Tuple = hidden_states + (hidden_state,) _SCREAMING_SNAKE_CASE : List[Any] = stage_module(__lowerCamelCase ) if output_hidden_states: _SCREAMING_SNAKE_CASE : Dict = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__lowerCamelCase , hidden_states=__lowerCamelCase ) class lowerCAmelCase__( __lowercase ): '''simple docstring''' __snake_case = RegNetConfig __snake_case = 'regnet' __snake_case = 'pixel_values' __snake_case = True def UpperCamelCase_ ( self , __lowerCamelCase ) -> Tuple: if isinstance(__lowerCamelCase , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" ) elif isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=False ) -> Dict: if isinstance(__lowerCamelCase , __lowerCamelCase ): _SCREAMING_SNAKE_CASE : Union[str, Any] = value UpperCamelCase__ =R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' UpperCamelCase__ =R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , __lowercase , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class lowerCAmelCase__( __lowercase ): '''simple docstring''' def __init__( self , __lowerCamelCase ) -> Dict: super().__init__(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = config _SCREAMING_SNAKE_CASE : Union[str, Any] = RegNetEmbeddings(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = RegNetEncoder(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Tuple = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None ) -> BaseModelOutputWithPoolingAndNoAttention: _SCREAMING_SNAKE_CASE : str = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _SCREAMING_SNAKE_CASE : Tuple = return_dict if return_dict is not None else self.config.use_return_dict _SCREAMING_SNAKE_CASE : Tuple = self.embedder(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = self.encoder( __lowerCamelCase , output_hidden_states=__lowerCamelCase , return_dict=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_outputs[0] _SCREAMING_SNAKE_CASE : Optional[Any] = self.pooler(__lowerCamelCase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__lowerCamelCase , pooler_output=__lowerCamelCase , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __lowercase , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class lowerCAmelCase__( __lowercase ): '''simple docstring''' def __init__( self , __lowerCamelCase ) -> Union[str, Any]: super().__init__(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = config.num_labels _SCREAMING_SNAKE_CASE : Any = RegNetModel(__lowerCamelCase ) # classification head _SCREAMING_SNAKE_CASE : Any = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase_ ( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , ) -> ImageClassifierOutputWithNoAttention: _SCREAMING_SNAKE_CASE : Any = return_dict if return_dict is not None else self.config.use_return_dict _SCREAMING_SNAKE_CASE : Tuple = self.regnet(__lowerCamelCase , output_hidden_states=__lowerCamelCase , return_dict=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = outputs.pooler_output if return_dict else outputs[1] _SCREAMING_SNAKE_CASE : Tuple = self.classifier(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _SCREAMING_SNAKE_CASE : List[Any] = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _SCREAMING_SNAKE_CASE : Dict = "single_label_classification" else: _SCREAMING_SNAKE_CASE : List[Any] = "multi_label_classification" if self.config.problem_type == "regression": _SCREAMING_SNAKE_CASE : Union[str, Any] = MSELoss() if self.num_labels == 1: _SCREAMING_SNAKE_CASE : Tuple = loss_fct(logits.squeeze() , labels.squeeze() ) else: _SCREAMING_SNAKE_CASE : Optional[int] = loss_fct(__lowerCamelCase , __lowerCamelCase ) elif self.config.problem_type == "single_label_classification": _SCREAMING_SNAKE_CASE : List[Any] = CrossEntropyLoss() _SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _SCREAMING_SNAKE_CASE : Tuple = BCEWithLogitsLoss() _SCREAMING_SNAKE_CASE : Any = loss_fct(__lowerCamelCase , __lowerCamelCase ) if not return_dict: _SCREAMING_SNAKE_CASE : Optional[int] = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__lowerCamelCase , logits=__lowerCamelCase , hidden_states=outputs.hidden_states )
364
from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch("socket.socket" ) @patch("builtins.open" ) def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ): # ===== initialization ===== _SCREAMING_SNAKE_CASE : List[Any] = Mock() _SCREAMING_SNAKE_CASE : Optional[Any] = conn, Mock() _SCREAMING_SNAKE_CASE : Dict = iter([1, None] ) _SCREAMING_SNAKE_CASE : Optional[Any] = lambda __lowerCamelCase : next(__lowerCamelCase ) # ===== invoke ===== send_file(filename="mytext.txt", testing=__lowerCamelCase ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
325
0
from __future__ import annotations import typing from collections import Counter def lowerCamelCase__ (__lowerCamelCase ): _SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter() for base in range(1, max_perimeter + 1 ): for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ): _SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(__lowerCamelCase ): _SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def lowerCamelCase__ (__lowerCamelCase = 1000 ): _SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(f"Perimeter {solution()} has maximum solutions")
365
import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class lowerCAmelCase__( __lowercase ): '''simple docstring''' __snake_case = ['image_processor', 'tokenizer'] __snake_case = 'BlipImageProcessor' __snake_case = 'AutoTokenizer' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: super().__init__(__lowerCamelCase , __lowerCamelCase ) # add QFormer tokenizer _SCREAMING_SNAKE_CASE : List[str] = qformer_tokenizer def __call__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , **__lowerCamelCase , ) -> BatchFeature: if images is None and text is None: raise ValueError("You have to specify at least images or text." ) _SCREAMING_SNAKE_CASE : Any = BatchFeature() if text is not None: _SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) encoding.update(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = self.qformer_tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) _SCREAMING_SNAKE_CASE : str = qformer_text_encoding.pop("input_ids" ) _SCREAMING_SNAKE_CASE : List[Any] = qformer_text_encoding.pop("attention_mask" ) if images is not None: _SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase ) encoding.update(__lowerCamelCase ) return encoding def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> str: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def UpperCamelCase_ ( self ) -> str: _SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.model_input_names _SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def UpperCamelCase_ ( self , __lowerCamelCase , **__lowerCamelCase ) -> Any: if os.path.isfile(__lowerCamelCase ): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , "qformer_tokenizer" ) self.qformer_tokenizer.save_pretrained(__lowerCamelCase ) return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase ) @classmethod def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" ) _SCREAMING_SNAKE_CASE : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase ) args.append(__lowerCamelCase ) return cls(*__lowerCamelCase )
325
0
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets UpperCamelCase__ =datasets.logging.get_logger(__name__) UpperCamelCase__ ='\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' UpperCamelCase__ ='\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' UpperCamelCase__ ='\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False, __lowerCamelCase=False, __lowerCamelCase=True, __lowerCamelCase=False, __lowerCamelCase="dummy_doc" ): _SCREAMING_SNAKE_CASE : Optional[int] = {doc: key_lines} _SCREAMING_SNAKE_CASE : List[str] = {doc: sys_lines} _SCREAMING_SNAKE_CASE : List[Any] = {} _SCREAMING_SNAKE_CASE : Optional[int] = 0 _SCREAMING_SNAKE_CASE : Tuple = 0 _SCREAMING_SNAKE_CASE : Tuple = 0 _SCREAMING_SNAKE_CASE : Dict = 0 _SCREAMING_SNAKE_CASE : int = 0 _SCREAMING_SNAKE_CASE : int = 0 _SCREAMING_SNAKE_CASE : Optional[int] = reader.get_doc_mentions(__lowerCamelCase, key_doc_lines[doc], __lowerCamelCase ) key_singletons_num += singletons_num if NP_only or min_span: _SCREAMING_SNAKE_CASE : List[Any] = reader.set_annotated_parse_trees(__lowerCamelCase, key_doc_lines[doc], __lowerCamelCase, __lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = reader.get_doc_mentions(__lowerCamelCase, sys_doc_lines[doc], __lowerCamelCase ) sys_singletons_num += singletons_num if NP_only or min_span: _SCREAMING_SNAKE_CASE : Tuple = reader.set_annotated_parse_trees(__lowerCamelCase, key_doc_lines[doc], __lowerCamelCase, __lowerCamelCase ) if remove_nested: _SCREAMING_SNAKE_CASE : List[Any] = reader.remove_nested_coref_mentions(__lowerCamelCase, __lowerCamelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _SCREAMING_SNAKE_CASE : Any = reader.remove_nested_coref_mentions(__lowerCamelCase, __lowerCamelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _SCREAMING_SNAKE_CASE : Optional[Any] = reader.get_mention_assignments(__lowerCamelCase, __lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = reader.get_mention_assignments(__lowerCamelCase, __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( "Number of removed nested coreferring mentions in the key " f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" ) logger.info( "Number of resulting singleton clusters in the key " f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" ) if not keep_singletons: logger.info( f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """ "files, respectively" ) return doc_coref_infos def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): _SCREAMING_SNAKE_CASE : Dict = get_coref_infos(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = {} _SCREAMING_SNAKE_CASE : List[Any] = 0 _SCREAMING_SNAKE_CASE : Tuple = 0 for name, metric in metrics: _SCREAMING_SNAKE_CASE : str = evaluator.evaluate_documents(__lowerCamelCase, __lowerCamelCase, beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} ) logger.info( name.ljust(10 ), f"""Recall: {recall * 100:.2f}""", f""" Precision: {precision * 100:.2f}""", f""" F1: {fa * 100:.2f}""", ) if conll_subparts_num == 3: _SCREAMING_SNAKE_CASE : Optional[Any] = (conll / 3) * 100 logger.info(f"""CoNLL score: {conll:.2f}""" ) output_scores.update({"conll_score": conll} ) return output_scores def lowerCamelCase__ (__lowerCamelCase ): _SCREAMING_SNAKE_CASE : str = False for line in key_lines: if not line.startswith("#" ): if len(line.split() ) > 6: _SCREAMING_SNAKE_CASE : Any = line.split()[5] if not parse_col == "-": _SCREAMING_SNAKE_CASE : Optional[Any] = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self ) -> List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ) ), "references": datasets.Sequence(datasets.Value("string" ) ), } ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[ "https://github.com/ns-moosavi/coval", "https://www.aclweb.org/anthology/P16-1060", "http://www.conll.cemantix.org/2012/data.html", ] , ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=False ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : List[str] = [ ("mentions", evaluator.mentions), ("muc", evaluator.muc), ("bcub", evaluator.b_cubed), ("ceafe", evaluator.ceafe), ("lea", evaluator.lea), ] if min_span: _SCREAMING_SNAKE_CASE : Dict = util.check_gold_parse_annotation(__lowerCamelCase ) if not has_gold_parse: raise NotImplementedError("References should have gold parse annotation to use 'min_span'." ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _SCREAMING_SNAKE_CASE : List[str] = evaluate( key_lines=__lowerCamelCase , sys_lines=__lowerCamelCase , metrics=__lowerCamelCase , NP_only=__lowerCamelCase , remove_nested=__lowerCamelCase , keep_singletons=__lowerCamelCase , min_span=__lowerCamelCase , ) return score
366
from maths.prime_check import is_prime def lowerCamelCase__ (__lowerCamelCase ): if not isinstance(__lowerCamelCase, __lowerCamelCase ): _SCREAMING_SNAKE_CASE : List[str] = f"""Input value of [number={number}] must be an integer""" raise TypeError(__lowerCamelCase ) if is_prime(__lowerCamelCase ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
325
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ =logging.get_logger(__name__) UpperCamelCase__ ={ 'edbeeching/decision-transformer-gym-hopper-medium': ( 'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class lowerCAmelCase__( __lowercase ): '''simple docstring''' __snake_case = 'decision_transformer' __snake_case = ['past_key_values'] __snake_case = { 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , __lowerCamelCase=1_7 , __lowerCamelCase=4 , __lowerCamelCase=1_2_8 , __lowerCamelCase=4_0_9_6 , __lowerCamelCase=True , __lowerCamelCase=1 , __lowerCamelCase=1_0_2_4 , __lowerCamelCase=3 , __lowerCamelCase=1 , __lowerCamelCase=None , __lowerCamelCase="relu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=1E-5 , __lowerCamelCase=0.02 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=5_0_2_5_6 , __lowerCamelCase=5_0_2_5_6 , __lowerCamelCase=False , __lowerCamelCase=False , **__lowerCamelCase , ) -> int: _SCREAMING_SNAKE_CASE : Optional[Any] = state_dim _SCREAMING_SNAKE_CASE : List[Any] = act_dim _SCREAMING_SNAKE_CASE : Any = hidden_size _SCREAMING_SNAKE_CASE : List[Any] = max_ep_len _SCREAMING_SNAKE_CASE : Any = action_tanh _SCREAMING_SNAKE_CASE : Any = vocab_size _SCREAMING_SNAKE_CASE : Any = n_positions _SCREAMING_SNAKE_CASE : Optional[int] = n_layer _SCREAMING_SNAKE_CASE : List[Any] = n_head _SCREAMING_SNAKE_CASE : Dict = n_inner _SCREAMING_SNAKE_CASE : str = activation_function _SCREAMING_SNAKE_CASE : Tuple = resid_pdrop _SCREAMING_SNAKE_CASE : Union[str, Any] = embd_pdrop _SCREAMING_SNAKE_CASE : Dict = attn_pdrop _SCREAMING_SNAKE_CASE : List[Any] = layer_norm_epsilon _SCREAMING_SNAKE_CASE : List[str] = initializer_range _SCREAMING_SNAKE_CASE : Any = scale_attn_weights _SCREAMING_SNAKE_CASE : Tuple = use_cache _SCREAMING_SNAKE_CASE : Tuple = scale_attn_by_inverse_layer_idx _SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn _SCREAMING_SNAKE_CASE : int = bos_token_id _SCREAMING_SNAKE_CASE : str = eos_token_id super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
367
from argparse import ArgumentParser from . import BaseTransformersCLICommand def lowerCamelCase__ (__lowerCamelCase ): return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code ) class lowerCAmelCase__( __lowercase ): '''simple docstring''' @staticmethod def UpperCamelCase_ ( __lowerCamelCase ) -> str: _SCREAMING_SNAKE_CASE : List[Any] = parser.add_parser("download" ) download_parser.add_argument( "--cache-dir" , type=__lowerCamelCase , default=__lowerCamelCase , help="Path to location to store the models" ) download_parser.add_argument( "--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" ) download_parser.add_argument( "--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , ) download_parser.add_argument("model" , type=__lowerCamelCase , help="Name of the model to download" ) download_parser.set_defaults(func=__lowerCamelCase ) def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]: _SCREAMING_SNAKE_CASE : Any = model _SCREAMING_SNAKE_CASE : Optional[int] = cache _SCREAMING_SNAKE_CASE : str = force _SCREAMING_SNAKE_CASE : str = trust_remote_code def UpperCamelCase_ ( self ) -> Optional[Any]: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
325
0
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class lowerCAmelCase__( unittest.TestCase ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=4 , ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE : Optional[Any] = parent _SCREAMING_SNAKE_CASE : Optional[Any] = batch_size _SCREAMING_SNAKE_CASE : Any = seq_length _SCREAMING_SNAKE_CASE : Optional[Any] = is_training _SCREAMING_SNAKE_CASE : Any = use_attention_mask _SCREAMING_SNAKE_CASE : Any = use_token_type_ids _SCREAMING_SNAKE_CASE : str = use_labels _SCREAMING_SNAKE_CASE : Optional[int] = vocab_size _SCREAMING_SNAKE_CASE : List[str] = hidden_size _SCREAMING_SNAKE_CASE : str = num_hidden_layers _SCREAMING_SNAKE_CASE : List[str] = num_attention_heads _SCREAMING_SNAKE_CASE : Any = intermediate_size _SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act _SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob _SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : Dict = max_position_embeddings _SCREAMING_SNAKE_CASE : List[str] = type_vocab_size _SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size _SCREAMING_SNAKE_CASE : Dict = initializer_range _SCREAMING_SNAKE_CASE : str = num_choices def UpperCamelCase_ ( self ) -> int: _SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE : Dict = None if self.use_attention_mask: _SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE : int = None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _SCREAMING_SNAKE_CASE : List[Any] = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def UpperCamelCase_ ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE : Tuple = config_and_inputs _SCREAMING_SNAKE_CASE : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class lowerCAmelCase__( __lowercase , unittest.TestCase ): '''simple docstring''' __snake_case = True __snake_case = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def UpperCamelCase_ ( self ) -> Dict: _SCREAMING_SNAKE_CASE : Any = FlaxRoFormerModelTester(self ) @slow def UpperCamelCase_ ( self ) -> Dict: for model_class_name in self.all_model_classes: _SCREAMING_SNAKE_CASE : Dict = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCamelCase ) @require_flax class lowerCAmelCase__( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self ) -> Dict: _SCREAMING_SNAKE_CASE : str = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) _SCREAMING_SNAKE_CASE : List[Any] = jnp.array([[0, 1, 2, 3, 4, 5]] ) _SCREAMING_SNAKE_CASE : Union[str, Any] = model(__lowerCamelCase )[0] _SCREAMING_SNAKE_CASE : List[str] = 5_0_0_0_0 _SCREAMING_SNAKE_CASE : Any = (1, 6, vocab_size) self.assertEqual(output.shape , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
368
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class lowerCAmelCase__: '''simple docstring''' __snake_case = BlenderbotSmallConfig __snake_case = {} __snake_case = 'gelu' def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> List[str]: _SCREAMING_SNAKE_CASE : int = parent _SCREAMING_SNAKE_CASE : Tuple = batch_size _SCREAMING_SNAKE_CASE : Dict = seq_length _SCREAMING_SNAKE_CASE : List[str] = is_training _SCREAMING_SNAKE_CASE : List[str] = use_labels _SCREAMING_SNAKE_CASE : Dict = vocab_size _SCREAMING_SNAKE_CASE : Dict = hidden_size _SCREAMING_SNAKE_CASE : int = num_hidden_layers _SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads _SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size _SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob _SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings _SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id _SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id _SCREAMING_SNAKE_CASE : List[str] = bos_token_id def UpperCamelCase_ ( self ) -> List[str]: _SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _SCREAMING_SNAKE_CASE : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 ) _SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE : str = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _SCREAMING_SNAKE_CASE : List[Any] = prepare_blenderbot_small_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return config, inputs_dict def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple: _SCREAMING_SNAKE_CASE : Any = TFBlenderbotSmallModel(config=__lowerCamelCase ).get_decoder() _SCREAMING_SNAKE_CASE : Dict = inputs_dict["input_ids"] _SCREAMING_SNAKE_CASE : List[Any] = input_ids[:1, :] _SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"][:1, :] _SCREAMING_SNAKE_CASE : List[str] = inputs_dict["head_mask"] _SCREAMING_SNAKE_CASE : int = 1 # first forward pass _SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size ) _SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) _SCREAMING_SNAKE_CASE : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] _SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx] _SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 ) def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ): if attention_mask is None: _SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(__lowerCamelCase, config.pad_token_id ), tf.inta ) if decoder_attention_mask is None: _SCREAMING_SNAKE_CASE : List[str] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ), ], axis=-1, ) if head_mask is None: _SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ): '''simple docstring''' __snake_case = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) __snake_case = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () __snake_case = ( { 'conversational': TFBlenderbotSmallForConditionalGeneration, 'feature-extraction': TFBlenderbotSmallModel, 'summarization': TFBlenderbotSmallForConditionalGeneration, 'text2text-generation': TFBlenderbotSmallForConditionalGeneration, 'translation': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) __snake_case = True __snake_case = False __snake_case = False def UpperCamelCase_ ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlenderbotSmallModelTester(self ) _SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=__lowerCamelCase ) def UpperCamelCase_ ( self ) -> Tuple: self.config_tester.run_common_tests() def UpperCamelCase_ ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase ) @require_tokenizers @require_tf class lowerCAmelCase__( unittest.TestCase ): '''simple docstring''' __snake_case = [ 'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ' ' i\'m going to throw up.\nand why is that?' ] __snake_case = 'facebook/blenderbot_small-90M' @cached_property def UpperCamelCase_ ( self ) -> List[Any]: # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) @cached_property def UpperCamelCase_ ( self ) -> str: _SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase_ ( self ) -> Tuple: _SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" ) _SCREAMING_SNAKE_CASE : Dict = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , ) _SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
325
0
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class lowerCAmelCase__( __lowercase ): '''simple docstring''' def UpperCamelCase_ ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowerCamelCase , "tf_padding" ) ) self.parent.assertTrue(hasattr(__lowerCamelCase , "depth_multiplier" ) ) class lowerCAmelCase__: '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3 , __lowerCamelCase=3_2 , __lowerCamelCase=0.25 , __lowerCamelCase=8 , __lowerCamelCase=True , __lowerCamelCase=1_0_2_4 , __lowerCamelCase=3_2 , __lowerCamelCase="relu6" , __lowerCamelCase=0.1 , __lowerCamelCase=0.02 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=None , ) -> int: _SCREAMING_SNAKE_CASE : Any = parent _SCREAMING_SNAKE_CASE : Optional[int] = batch_size _SCREAMING_SNAKE_CASE : int = num_channels _SCREAMING_SNAKE_CASE : Any = image_size _SCREAMING_SNAKE_CASE : Optional[Any] = depth_multiplier _SCREAMING_SNAKE_CASE : Optional[Any] = min_depth _SCREAMING_SNAKE_CASE : Union[str, Any] = tf_padding _SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier ) _SCREAMING_SNAKE_CASE : Dict = output_stride _SCREAMING_SNAKE_CASE : Any = hidden_act _SCREAMING_SNAKE_CASE : Any = classifier_dropout_prob _SCREAMING_SNAKE_CASE : int = use_labels _SCREAMING_SNAKE_CASE : Optional[Any] = is_training _SCREAMING_SNAKE_CASE : Dict = num_labels _SCREAMING_SNAKE_CASE : Optional[int] = initializer_range _SCREAMING_SNAKE_CASE : Optional[int] = scope def UpperCamelCase_ ( self ) -> Dict: _SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : Optional[int] = None if self.use_labels: _SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) _SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _SCREAMING_SNAKE_CASE : int = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self ) -> List[str]: return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() _SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: _SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels _SCREAMING_SNAKE_CASE : int = MobileNetVaForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() _SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self ) -> str: _SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE : List[str] = config_and_inputs _SCREAMING_SNAKE_CASE : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ): '''simple docstring''' __snake_case = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () __snake_case = ( {'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification} if is_torch_available() else {} ) __snake_case = False __snake_case = False __snake_case = False __snake_case = False def UpperCamelCase_ ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : List[Any] = MobileNetVaModelTester(self ) _SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def UpperCamelCase_ ( self ) -> str: self.config_tester.run_common_tests() @unittest.skip(reason="MobileNetV1 does not use inputs_embeds" ) def UpperCamelCase_ ( self ) -> Optional[int]: pass @unittest.skip(reason="MobileNetV1 does not support input and output embeddings" ) def UpperCamelCase_ ( self ) -> Any: pass @unittest.skip(reason="MobileNetV1 does not output attentions" ) def UpperCamelCase_ ( self ) -> Any: pass def UpperCamelCase_ ( self ) -> Tuple: _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : Any = model_class(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()] _SCREAMING_SNAKE_CASE : str = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def UpperCamelCase_ ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def UpperCamelCase_ ( self ) -> Optional[int]: def check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): _SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): _SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) _SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states _SCREAMING_SNAKE_CASE : Optional[Any] = 2_6 self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _SCREAMING_SNAKE_CASE : str = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def UpperCamelCase_ ( self ) -> int: _SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def UpperCamelCase_ ( self ) -> Dict: for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def lowerCamelCase__ (): _SCREAMING_SNAKE_CASE : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCAmelCase__( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ) -> Tuple: return ( MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self ) -> str: _SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor _SCREAMING_SNAKE_CASE : str = prepare_img() _SCREAMING_SNAKE_CASE : List[str] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE : int = model(**__lowerCamelCase ) # verify the logits _SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 1_0_0_1) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
369
from math import isqrt, loga def lowerCamelCase__ (__lowerCamelCase ): _SCREAMING_SNAKE_CASE : List[Any] = [True] * max_number for i in range(2, isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2, __lowerCamelCase, __lowerCamelCase ): _SCREAMING_SNAKE_CASE : Optional[Any] = False return [i for i in range(2, __lowerCamelCase ) if is_prime[i]] def lowerCamelCase__ (__lowerCamelCase = 800800, __lowerCamelCase = 800800 ): _SCREAMING_SNAKE_CASE : Optional[int] = degree * loga(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = int(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = calculate_prime_numbers(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = 0 _SCREAMING_SNAKE_CASE : int = 0 _SCREAMING_SNAKE_CASE : Dict = len(__lowerCamelCase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f"{solution() = }")
325
0
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowerCamelCase__ (__lowerCamelCase ): _SCREAMING_SNAKE_CASE : Dict = [False] * len(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = [-1] * len(__lowerCamelCase ) def dfs(__lowerCamelCase, __lowerCamelCase ): _SCREAMING_SNAKE_CASE : List[Any] = True _SCREAMING_SNAKE_CASE : Optional[int] = c for u in graph[v]: if not visited[u]: dfs(__lowerCamelCase, 1 - c ) for i in range(len(__lowerCamelCase ) ): if not visited[i]: dfs(__lowerCamelCase, 0 ) for i in range(len(__lowerCamelCase ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph UpperCamelCase__ ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
370
from math import factorial def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ): # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k" ) return factorial(__lowerCamelCase ) // (factorial(__lowerCamelCase ) * factorial(n - k )) if __name__ == "__main__": print( 'The number of five-card hands possible from a standard', f"fifty-two card deck is: {combinations(52, 5)}\n", ) print( 'If a class of 40 students must be arranged into groups of', f"4 for group projects, there are {combinations(40, 4)} ways", 'to arrange them.\n', ) print( 'If 10 teams are competing in a Formula One race, there', f"are {combinations(10, 3)} ways that first, second and", 'third place can be awarded.', )
325
0
from __future__ import annotations def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ): if len(__lowerCamelCase ) == 0: return False _SCREAMING_SNAKE_CASE : List[Any] = len(__lowerCamelCase ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint], __lowerCamelCase ) else: return binary_search(a_list[midpoint + 1 :], __lowerCamelCase ) if __name__ == "__main__": UpperCamelCase__ =input('Enter numbers separated by comma:\n').strip() UpperCamelCase__ =[int(item.strip()) for item in user_input.split(',')] UpperCamelCase__ =int(input('Enter the number to be found in the list:\n').strip()) UpperCamelCase__ ='' if binary_search(sequence, target) else 'not ' print(f"{target} was {not_str}found in {sequence}")
371
import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class lowerCAmelCase__( __lowercase , __lowercase ): '''simple docstring''' @register_to_config def __init__( self , __lowerCamelCase = 1_2_8 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2000.0 , __lowerCamelCase = 7_6_8 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_2 , __lowerCamelCase = 6_4 , __lowerCamelCase = 2_0_4_8 , __lowerCamelCase = 0.1 , ) -> int: super().__init__() _SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential( nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , ) _SCREAMING_SNAKE_CASE : str = nn.Embedding(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(p=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList() for lyr_num in range(__lowerCamelCase ): # FiLM conditional T5 decoder _SCREAMING_SNAKE_CASE : Optional[int] = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase ) self.decoders.append(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = nn.Dropout(p=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]: _SCREAMING_SNAKE_CASE : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. _SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) _SCREAMING_SNAKE_CASE : str = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) _SCREAMING_SNAKE_CASE : Tuple = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. _SCREAMING_SNAKE_CASE : Optional[int] = torch.broadcast_to( torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_encoding(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = self.continuous_inputs_projection(__lowerCamelCase ) inputs += position_encodings _SCREAMING_SNAKE_CASE : Any = self.dropout(__lowerCamelCase ) # decoder: No padding present. _SCREAMING_SNAKE_CASE : Any = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. _SCREAMING_SNAKE_CASE : List[str] = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks] # cross attend style: concat encodings _SCREAMING_SNAKE_CASE : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) _SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: _SCREAMING_SNAKE_CASE : Optional[Any] = lyr( __lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0] _SCREAMING_SNAKE_CASE : int = self.decoder_norm(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = self.post_dropout(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = self.spec_out(__lowerCamelCase ) return spec_out class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> Dict: super().__init__() _SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE : int = self.layer[0]( __lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , ) if encoder_hidden_states is not None: _SCREAMING_SNAKE_CASE : str = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to( encoder_hidden_states.dtype ) _SCREAMING_SNAKE_CASE : Tuple = self.layer[1]( __lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , ) # Apply Film Conditional Feed Forward layer _SCREAMING_SNAKE_CASE : Optional[Any] = self.layer[-1](__lowerCamelCase , __lowerCamelCase ) return (hidden_states,) class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: super().__init__() _SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]: # pre_self_attention_layer_norm _SCREAMING_SNAKE_CASE : int = self.layer_norm(__lowerCamelCase ) if conditioning_emb is not None: _SCREAMING_SNAKE_CASE : Any = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase ) # Self-attention block _SCREAMING_SNAKE_CASE : Optional[int] = self.attention(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(__lowerCamelCase ) return hidden_states class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]: super().__init__() _SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]: _SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = self.attention( __lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + self.dropout(__lowerCamelCase ) return layer_output class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]: super().__init__() _SCREAMING_SNAKE_CASE : Tuple = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> List[str]: _SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(__lowerCamelCase ) if conditioning_emb is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.film(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = self.DenseReluDense(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = hidden_states + self.dropout(__lowerCamelCase ) return hidden_states class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: super().__init__() _SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = NewGELUActivation() def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any: _SCREAMING_SNAKE_CASE : Dict = self.act(self.wi_a(__lowerCamelCase ) ) _SCREAMING_SNAKE_CASE : Dict = self.wi_a(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = hidden_gelu * hidden_linear _SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = self.wo(__lowerCamelCase ) return hidden_states class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> int: super().__init__() _SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(__lowerCamelCase ) ) _SCREAMING_SNAKE_CASE : str = eps def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]: # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 _SCREAMING_SNAKE_CASE : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: _SCREAMING_SNAKE_CASE : str = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class lowerCAmelCase__( nn.Module ): '''simple docstring''' def UpperCamelCase_ ( self , __lowerCamelCase ) -> torch.Tensor: return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__lowerCamelCase , 3.0 )) )) class lowerCAmelCase__( nn.Module ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: super().__init__() _SCREAMING_SNAKE_CASE : Any = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict: _SCREAMING_SNAKE_CASE : List[Any] = self.scale_bias(__lowerCamelCase ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = torch.chunk(__lowerCamelCase , 2 , -1 ) _SCREAMING_SNAKE_CASE : Optional[int] = x * (1 + scale) + shift return x
325
0
"""simple docstring""" import mpmath # for roots of unity import numpy as np class _a : """simple docstring""" def __init__( self : Any , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : str=None )->Tuple: # Input as list _UpperCAmelCase = list(poly_a or [0] )[:] _UpperCAmelCase = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _UpperCAmelCase = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() _UpperCAmelCase = len(self.polyB ) # Add 0 to make lengths equal a power of 2 _UpperCAmelCase = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform _UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product _UpperCAmelCase = self.__multiply() def lowercase__ ( self : List[str] , __UpperCamelCase : List[str] )->List[str]: _UpperCAmelCase = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB] # Corner case if len(__UpperCamelCase ) <= 1: return dft[0] # _UpperCAmelCase = self.c_max_length // 2 while next_ncol > 0: _UpperCAmelCase = [[] for i in range(__UpperCamelCase )] _UpperCAmelCase = self.root**next_ncol # First half of next step _UpperCAmelCase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__UpperCamelCase ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step _UpperCAmelCase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__UpperCamelCase ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update _UpperCAmelCase = new_dft _UpperCAmelCase = next_ncol // 2 return dft[0] def lowercase__ ( self : Optional[int] )->Union[str, Any]: _UpperCAmelCase = self.__dft('''A''' ) _UpperCAmelCase = self.__dft('''B''' ) _UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT _UpperCAmelCase = 2 while next_ncol <= self.c_max_length: _UpperCAmelCase = [[] for i in range(__UpperCamelCase )] _UpperCAmelCase = self.root ** (next_ncol // 2) _UpperCAmelCase = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update _UpperCAmelCase = new_inverse_c next_ncol *= 2 # Unpack _UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : Tuple )->List[str]: _UpperCAmelCase = '''A = ''' + ''' + '''.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) ) _UpperCAmelCase = '''B = ''' + ''' + '''.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) ) _UpperCAmelCase = '''A*B = ''' + ''' + '''.join( F'{coef}*x^{i}' for coef, i in enumerate(self.product ) ) return F'{a}\n{b}\n{c}' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
326
"""simple docstring""" from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = None UpperCamelCase__ = None __A : Union[str, Any] = namedtuple("CoinsDistribResult", "moves excess") def lowercase ( _SCREAMING_SNAKE_CASE : TreeNode | None ): '''simple docstring''' if root is None: return 0 # Validation def count_nodes(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ): raise ValueError('''The nodes number should be same as the number of coins''' ) # Main calculation def get_distrib(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) _UpperCAmelCase , _UpperCAmelCase = get_distrib(node.left ) _UpperCAmelCase , _UpperCAmelCase = get_distrib(node.right ) _UpperCAmelCase = 1 - left_distrib_excess _UpperCAmelCase = 1 - right_distrib_excess _UpperCAmelCase = ( left_distrib_moves + right_distrib_moves + abs(_SCREAMING_SNAKE_CASE ) + abs(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = node.data - coins_to_left - coins_to_right return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return get_distrib(_SCREAMING_SNAKE_CASE )[0] if __name__ == "__main__": import doctest doctest.testmod()
326
1
"""simple docstring""" import requests __A : int = "YOUR API KEY" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = giphy_api_key ): '''simple docstring''' _UpperCAmelCase = '''+'''.join(query.split() ) _UpperCAmelCase = f'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}' _UpperCAmelCase = requests.get(_SCREAMING_SNAKE_CASE ).json()['''data'''] return [gif["url"] for gif in gifs] if __name__ == "__main__": print("\n".join(get_gifs("space ship")))
326
"""simple docstring""" from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) UpperCamelCase__ = ( { """feature-extraction""": TFMobileBertModel, """fill-mask""": TFMobileBertForMaskedLM, """question-answering""": TFMobileBertForQuestionAnswering, """text-classification""": TFMobileBertForSequenceClassification, """token-classification""": TFMobileBertForTokenClassification, """zero-shot""": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False def lowercase__ ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str=False )->Optional[Any]: _UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) if return_labels: if model_class in get_values(__UpperCamelCase ): _UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class _a ( lowerCAmelCase): """simple docstring""" def __init__( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=1_3 , __UpperCamelCase : Any=7 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Dict=9_9 , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Optional[Any]=3_7 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Optional[Any]=5_1_2 , __UpperCamelCase : Any=1_6 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : List[str]=None , )->Any: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope _UpperCAmelCase = embedding_size def lowercase__ ( self : Optional[int] )->int: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] )->List[Any]: _UpperCAmelCase = TFMobileBertModel(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = [input_ids, input_mask] _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowercase__ ( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->Tuple: _UpperCAmelCase = TFMobileBertForMaskedLM(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Any )->List[Any]: _UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict )->List[Any]: _UpperCAmelCase = TFMobileBertForPreTraining(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] )->Any: _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForSequenceClassification(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] )->List[str]: _UpperCAmelCase = self.num_choices _UpperCAmelCase = TFMobileBertForMultipleChoice(config=__UpperCamelCase ) _UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any )->Dict: _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForTokenClassification(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->List[Any]: _UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : List[str] )->Optional[Any]: _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict def lowercase__ ( self : List[Any] )->str: _UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 ) def lowercase__ ( self : List[Any] )->List[str]: self.config_tester.run_common_tests() def lowercase__ ( self : Optional[Any] )->Union[str, Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__UpperCamelCase ) def lowercase__ ( self : Any )->Union[str, Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__UpperCamelCase ) def lowercase__ ( self : List[Any] )->Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__UpperCamelCase ) def lowercase__ ( self : str )->Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__UpperCamelCase ) def lowercase__ ( self : Any )->List[str]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__UpperCamelCase ) def lowercase__ ( self : Dict )->Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__UpperCamelCase ) def lowercase__ ( self : Any )->Optional[Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__UpperCamelCase ) def lowercase__ ( self : List[str] )->Tuple: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__UpperCamelCase ) @slow def lowercase__ ( self : Tuple )->List[str]: # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _UpperCAmelCase = TFMobileBertModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) @require_tf class _a ( unittest.TestCase): """simple docstring""" @slow def lowercase__ ( self : str )->Dict: _UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' ) _UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) _UpperCAmelCase = model(__UpperCamelCase )[0] _UpperCAmelCase = [1, 6, 3_0_5_2_2] self.assertEqual(output.shape , __UpperCamelCase ) _UpperCAmelCase = tf.constant( [ [ [-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6], [-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7], [-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 )
326
1
"""simple docstring""" import math def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 0 while num > 0: _UpperCAmelCase = num % 8 _UpperCAmelCase = octal + (remainder * math.floor(math.pow(10 , _SCREAMING_SNAKE_CASE ) )) counter += 1 _UpperCAmelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return f'0o{int(_SCREAMING_SNAKE_CASE )}' def lowercase ( ): '''simple docstring''' print('''\n2 in octal is:''' ) print(decimal_to_octal(2 ) ) # = 2 print('''\n8 in octal is:''' ) print(decimal_to_octal(8 ) ) # = 10 print('''\n65 in octal is:''' ) print(decimal_to_octal(65 ) ) # = 101 print('''\n216 in octal is:''' ) print(decimal_to_octal(216 ) ) # = 330 print('''\n512 in octal is:''' ) print(decimal_to_octal(512 ) ) # = 1000 print('''\n''' ) if __name__ == "__main__": main()
326
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('''Input value must be a \'int\' type''' ) return bin(_SCREAMING_SNAKE_CASE ).count('''1''' ) if __name__ == "__main__": import doctest doctest.testmod()
326
1
"""simple docstring""" import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class _a : """simple docstring""" UpperCamelCase__ = None def lowercase__ ( self : int )->List[str]: _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) _UpperCAmelCase = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , __UpperCamelCase ) def lowercase__ ( self : int )->Optional[int]: _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''feat_extract.json''' ) feat_extract_first.to_json_file(__UpperCamelCase ) _UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowercase__ ( self : Any )->str: _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0] check_json_file_has_correct_format(__UpperCamelCase ) _UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowercase__ ( self : Union[str, Any] )->Optional[Any]: _UpperCAmelCase = self.feature_extraction_class() self.assertIsNotNone(__UpperCamelCase )
326
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow __A : Tuple = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""") @require_torch @require_tf @slow class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Path , __UpperCamelCase : Union[str, None] = None , __UpperCamelCase : Union[List[str], None] = None , __UpperCamelCase : Union[str, List[str], None] = None , __UpperCamelCase : bool = True , )->Tuple: _UpperCAmelCase = [file for file in os.listdir(__UpperCamelCase ) if os.path.isfile(os.path.join(__UpperCamelCase , __UpperCamelCase ) )] if identifier is not None: _UpperCAmelCase = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(__UpperCamelCase , __UpperCamelCase ): for n_ in n_identifier: _UpperCAmelCase = [file for file in files if n_ not in file] else: _UpperCAmelCase = [file for file in files if n_identifier not in file] _UpperCAmelCase = ignore_files or [] ignore_files.append('''__init__.py''' ) _UpperCAmelCase = [file for file in files if file not in ignore_files] for file in files: # Open all files print('''Testing''' , __UpperCamelCase ) if only_modules: _UpperCAmelCase = file.split('''.''' )[0] try: _UpperCAmelCase = getattr(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = doctest.DocTestSuite(__UpperCamelCase ) _UpperCAmelCase = unittest.TextTestRunner().run(__UpperCamelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F'{module_identifier} is not a module.' ) else: _UpperCAmelCase = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def lowercase__ ( self : str )->int: _UpperCAmelCase = Path('''src/transformers''' ) _UpperCAmelCase = '''modeling''' _UpperCAmelCase = [ '''modeling_ctrl.py''', '''modeling_tf_ctrl.py''', ] self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase , ignore_files=__UpperCamelCase ) def lowercase__ ( self : List[Any] )->int: _UpperCAmelCase = Path('''src/transformers''' ) _UpperCAmelCase = '''tokenization''' self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase ) def lowercase__ ( self : str )->Any: _UpperCAmelCase = Path('''src/transformers''' ) _UpperCAmelCase = '''configuration''' self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase ) def lowercase__ ( self : int )->Optional[Any]: _UpperCAmelCase = Path('''src/transformers''' ) _UpperCAmelCase = ['''configuration''', '''modeling''', '''tokenization'''] self.analyze_directory(__UpperCamelCase , n_identifier=__UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->Any: _UpperCAmelCase = Path('''docs/source''' ) _UpperCAmelCase = ['''favicon.ico'''] self.analyze_directory(__UpperCamelCase , ignore_files=__UpperCamelCase , only_modules=__UpperCamelCase )
326
1
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list ): '''simple docstring''' if len(_SCREAMING_SNAKE_CASE ) < 2: return collection def circle_sort_util(_SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> bool: _UpperCAmelCase = False if low == high: return swapped _UpperCAmelCase = low _UpperCAmelCase = high while left < right: if collection[left] > collection[right]: _UpperCAmelCase , _UpperCAmelCase = ( collection[right], collection[left], ) _UpperCAmelCase = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: _UpperCAmelCase , _UpperCAmelCase = ( collection[right + 1], collection[left], ) _UpperCAmelCase = True _UpperCAmelCase = low + int((high - low) / 2 ) _UpperCAmelCase = circle_sort_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = circle_sort_util(_SCREAMING_SNAKE_CASE , mid + 1 , _SCREAMING_SNAKE_CASE ) return swapped or left_swap or right_swap _UpperCAmelCase = True while is_not_sorted is True: _UpperCAmelCase = circle_sort_util(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 ) return collection if __name__ == "__main__": __A : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip() __A : Tuple = [int(item) for item in user_input.split(",")] print(circle_sort(unsorted))
326
"""simple docstring""" # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = None def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=0.999 , _SCREAMING_SNAKE_CASE : Any="cosine" , ): '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Tuple ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Any ): return math.exp(t * -12.0 ) else: raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' ) _UpperCAmelCase = [] for i in range(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = i / num_diffusion_timesteps _UpperCAmelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa ) class _a ( lowerCAmelCase , lowerCAmelCase): """simple docstring""" UpperCamelCase__ = 1 @register_to_config def __init__( self : List[Any] , __UpperCamelCase : int = 1_0_0_0 , __UpperCamelCase : float = 0.0_0_0_1 , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : str = "linear" , __UpperCamelCase : Optional[Union[np.ndarray, List[float]]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : int = 0 , __UpperCamelCase : str = "epsilon" , __UpperCamelCase : float = 1.0 , **__UpperCamelCase : Optional[int] , )->Dict: if kwargs.get('''set_alpha_to_one''' , __UpperCamelCase ) is not None: _UpperCAmelCase = ( '''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.''' ) deprecate('''set_alpha_to_one''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase ) _UpperCAmelCase = kwargs['''set_alpha_to_one'''] if trained_betas is not None: _UpperCAmelCase = torch.tensor(__UpperCamelCase , dtype=torch.floataa ) elif beta_schedule == "linear": _UpperCAmelCase = torch.linspace(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _UpperCAmelCase = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCamelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _UpperCAmelCase = betas_for_alpha_bar(__UpperCamelCase ) else: raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' ) _UpperCAmelCase = 1.0 - self.betas _UpperCAmelCase = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. _UpperCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution _UpperCAmelCase = 1.0 # setable values _UpperCAmelCase = None _UpperCAmelCase = torch.from_numpy(np.arange(0 , __UpperCamelCase ).copy().astype(np.intaa ) ) def lowercase__ ( self : str , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : Optional[int] = None )->torch.FloatTensor: return sample def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : Union[str, torch.device] = None )->Any: if num_inference_steps > self.config.num_train_timesteps: raise ValueError( F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:' F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle' F' maximal {self.config.num_train_timesteps} timesteps.' ) _UpperCAmelCase = num_inference_steps _UpperCAmelCase = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _UpperCAmelCase = (np.arange(0 , __UpperCamelCase ) * step_ratio).round().copy().astype(np.intaa ) _UpperCAmelCase = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase ) self.timesteps += self.config.steps_offset def lowercase__ ( self : Any , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : int , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float = 0.0 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : bool = True , )->Union[DDIMSchedulerOutput, Tuple]: # 1. get previous step value (=t+1) _UpperCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process _UpperCAmelCase = self.alphas_cumprod[timestep] _UpperCAmelCase = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) _UpperCAmelCase = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": _UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 _UpperCAmelCase = model_output elif self.config.prediction_type == "sample": _UpperCAmelCase = model_output _UpperCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": _UpperCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output _UpperCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or' ''' `v_prediction`''' ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: _UpperCAmelCase = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _UpperCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase ) def __len__( self : Any )->str: return self.config.num_train_timesteps
326
1
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 # setable values UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = None @classmethod def lowercase__ ( cls : Tuple , __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray )->int: return cls(common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase ) @dataclass class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = 42 class _a ( lowerCAmelCase , lowerCAmelCase): """simple docstring""" UpperCamelCase__ = [e.name for e in FlaxKarrasDiffusionSchedulers] UpperCamelCase__ = 42 @property def lowercase__ ( self : str )->Any: return True @register_to_config def __init__( self : str , __UpperCamelCase : int = 1_0_0_0 , __UpperCamelCase : float = 0.0_0_0_1 , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : str = "linear" , __UpperCamelCase : Optional[jnp.ndarray] = None , __UpperCamelCase : str = "fixed_small" , __UpperCamelCase : bool = True , __UpperCamelCase : str = "epsilon" , __UpperCamelCase : jnp.dtype = jnp.floataa , )->int: _UpperCAmelCase = dtype def lowercase__ ( self : int , __UpperCamelCase : Optional[CommonSchedulerState] = None )->DDPMSchedulerState: if common is None: _UpperCAmelCase = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution _UpperCAmelCase = jnp.array(1.0 , dtype=self.dtype ) _UpperCAmelCase = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase , ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : DDPMSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : Optional[int] = None )->jnp.ndarray: return sample def lowercase__ ( self : str , __UpperCamelCase : DDPMSchedulerState , __UpperCamelCase : int , __UpperCamelCase : Tuple = () )->DDPMSchedulerState: _UpperCAmelCase = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 _UpperCAmelCase = (jnp.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase , ) def lowercase__ ( self : str , __UpperCamelCase : DDPMSchedulerState , __UpperCamelCase : Dict , __UpperCamelCase : Any=None , __UpperCamelCase : Dict=None )->int: _UpperCAmelCase = state.common.alphas_cumprod[t] _UpperCAmelCase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample _UpperCAmelCase = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: _UpperCAmelCase = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": _UpperCAmelCase = jnp.clip(__UpperCamelCase , a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": _UpperCAmelCase = jnp.log(jnp.clip(__UpperCamelCase , a_min=1e-20 ) ) elif variance_type == "fixed_large": _UpperCAmelCase = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log _UpperCAmelCase = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": _UpperCAmelCase = variance _UpperCAmelCase = state.common.betas[t] _UpperCAmelCase = (predicted_variance + 1) / 2 _UpperCAmelCase = frac * max_log + (1 - frac) * min_log return variance def lowercase__ ( self : Optional[int] , __UpperCamelCase : DDPMSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : int , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : Optional[jax.random.KeyArray] = None , __UpperCamelCase : bool = True , )->Union[FlaxDDPMSchedulerOutput, Tuple]: _UpperCAmelCase = timestep if key is None: _UpperCAmelCase = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: _UpperCAmelCase , _UpperCAmelCase = jnp.split(__UpperCamelCase , sample.shape[1] , axis=1 ) else: _UpperCAmelCase = None # 1. compute alphas, betas _UpperCAmelCase = state.common.alphas_cumprod[t] _UpperCAmelCase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) _UpperCAmelCase = 1 - alpha_prod_t _UpperCAmelCase = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": _UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": _UpperCAmelCase = model_output elif self.config.prediction_type == "v_prediction": _UpperCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ' ''' for the FlaxDDPMScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: _UpperCAmelCase = jnp.clip(__UpperCamelCase , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _UpperCAmelCase = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t _UpperCAmelCase = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _UpperCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): _UpperCAmelCase = jax.random.split(__UpperCamelCase , num=1 ) _UpperCAmelCase = jax.random.normal(__UpperCamelCase , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(__UpperCamelCase , __UpperCamelCase , predicted_variance=__UpperCamelCase ) ** 0.5) * noise _UpperCAmelCase = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) _UpperCAmelCase = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=__UpperCamelCase , state=__UpperCamelCase ) def lowercase__ ( self : Any , __UpperCamelCase : DDPMSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , )->jnp.ndarray: return add_noise_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Dict , __UpperCamelCase : DDPMSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , )->jnp.ndarray: return get_velocity_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def __len__( self : Any )->Union[str, Any]: return self.config.num_train_timesteps
326
"""simple docstring""" from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = int(number**0.5 ) return number == sq * sq def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den _UpperCAmelCase = x_den * y_den * z_den _UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) top //= hcf bottom //= hcf return top, bottom def lowercase ( _SCREAMING_SNAKE_CASE : int = 35 ): '''simple docstring''' _UpperCAmelCase = set() _UpperCAmelCase = 42 _UpperCAmelCase = Fraction(0 ) _UpperCAmelCase = 42 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 _UpperCAmelCase = x_num * y_den + x_den * y_num _UpperCAmelCase = x_den * y_den _UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCAmelCase = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 _UpperCAmelCase = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) _UpperCAmelCase = x_den * x_den * y_den * y_den if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCAmelCase = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=-1 _UpperCAmelCase = x_num * y_num _UpperCAmelCase = x_den * y_num + x_num * y_den _UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCAmelCase = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 _UpperCAmelCase = x_num * x_num * y_num * y_num _UpperCAmelCase = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCAmelCase = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) for num, den in unique_s: total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return total.denominator + total.numerator if __name__ == "__main__": print(f'''{solution() = }''')
326
1
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : Dict )->List[Any]: _UpperCAmelCase = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) _UpperCAmelCase = get_activation('''gelu''' ) self.assertTrue(torch.allclose(gelu_python(__UpperCamelCase ) , torch_builtin(__UpperCamelCase ) ) ) self.assertFalse(torch.allclose(gelu_python(__UpperCamelCase ) , gelu_new(__UpperCamelCase ) ) ) def lowercase__ ( self : List[Any] )->List[Any]: _UpperCAmelCase = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) _UpperCAmelCase = get_activation('''gelu''' ) _UpperCAmelCase = get_activation('''gelu_10''' ) _UpperCAmelCase = torch_builtin(__UpperCamelCase ) _UpperCAmelCase = geluaa(__UpperCamelCase ) _UpperCAmelCase = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 ) self.assertTrue(torch.max(__UpperCamelCase ).item() == 1_0.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def lowercase__ ( self : Tuple )->int: get_activation('''gelu''' ) get_activation('''gelu_10''' ) get_activation('''gelu_fast''' ) get_activation('''gelu_new''' ) get_activation('''gelu_python''' ) get_activation('''gelu_pytorch_tanh''' ) get_activation('''linear''' ) get_activation('''mish''' ) get_activation('''quick_gelu''' ) get_activation('''relu''' ) get_activation('''sigmoid''' ) get_activation('''silu''' ) get_activation('''swish''' ) get_activation('''tanh''' ) with self.assertRaises(__UpperCamelCase ): get_activation('''bogus''' ) with self.assertRaises(__UpperCamelCase ): get_activation(__UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Union[str, Any]: _UpperCAmelCase = get_activation('''gelu''' ) _UpperCAmelCase = 1 _UpperCAmelCase = get_activation('''gelu''' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(__UpperCamelCase ): _UpperCAmelCase = acta.a
326
"""simple docstring""" import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' with open(_SCREAMING_SNAKE_CASE ) as metadata_file: _UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path _UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''module'''] # Load the entity vocab file _UpperCAmelCase = load_original_entity_vocab(_SCREAMING_SNAKE_CASE ) # add an entry for [MASK2] _UpperCAmelCase = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 _UpperCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks _UpperCAmelCase = AddedToken('''<ent>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = AddedToken('''<ent2>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''r''' ) as f: _UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = '''MLukeTokenizer''' with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''w''' ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) # Initialize the embeddings of the special tokens _UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''@'''] )[0] _UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''#'''] )[0] _UpperCAmelCase = state_dict['''embeddings.word_embeddings.weight'''] _UpperCAmelCase = word_emb[ent_init_index].unsqueeze(0 ) _UpperCAmelCase = word_emb[enta_init_index].unsqueeze(0 ) _UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: _UpperCAmelCase = state_dict[bias_name] _UpperCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 ) _UpperCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 ) _UpperCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _UpperCAmelCase = f'encoder.layer.{layer_index}.attention.self.' _UpperCAmelCase = state_dict[prefix + matrix_name] _UpperCAmelCase = state_dict[prefix + matrix_name] _UpperCAmelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _UpperCAmelCase = state_dict['''entity_embeddings.entity_embeddings.weight'''] _UpperCAmelCase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 ) _UpperCAmelCase = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' _UpperCAmelCase = state_dict['''entity_predictions.bias'''] _UpperCAmelCase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 ) _UpperCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] ) _UpperCAmelCase = LukeForMaskedLM(config=_SCREAMING_SNAKE_CASE ).eval() state_dict.pop('''entity_predictions.decoder.weight''' ) state_dict.pop('''lm_head.decoder.weight''' ) state_dict.pop('''lm_head.decoder.bias''' ) _UpperCAmelCase = OrderedDict() for key, value in state_dict.items(): if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )): _UpperCAmelCase = state_dict[key] else: _UpperCAmelCase = state_dict[key] _UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) if set(_SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}: raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' ) if set(_SCREAMING_SNAKE_CASE ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f'Unexpected missing_keys: {missing_keys}' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs _UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task='''entity_classification''' ) _UpperCAmelCase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).''' _UpperCAmelCase = (0, 9) _UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCAmelCase = torch.Size((1, 33, 768) ) _UpperCAmelCase = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCAmelCase = torch.Size((1, 1, 768) ) _UpperCAmelCase = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' f' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction _UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = '''Tokyo is the capital of <mask>.''' _UpperCAmelCase = (24, 30) _UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = encoding['''input_ids'''][0].tolist() _UpperCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) ) _UpperCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.entity_logits[0][0].argmax().item() _UpperCAmelCase = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(_SCREAMING_SNAKE_CASE ) ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = ['''[MASK]''', '''[PAD]''', '''[UNK]'''] _UpperCAmelCase = [json.loads(_SCREAMING_SNAKE_CASE ) for line in open(_SCREAMING_SNAKE_CASE )] _UpperCAmelCase = {} for entry in data: _UpperCAmelCase = entry['''id'''] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: _UpperCAmelCase = entity_id break _UpperCAmelCase = f'{language}:{entity_name}' _UpperCAmelCase = entity_id return new_mapping if __name__ == "__main__": __A : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) __A : List[str] = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
326
1
"""simple docstring""" import string import numpy def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE ) class _a : """simple docstring""" UpperCamelCase__ = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) UpperCamelCase__ = numpy.vectorize(lambda lowerCAmelCase: x % 36) UpperCamelCase__ = numpy.vectorize(lowerCAmelCase) def __init__( self : List[Any] , __UpperCamelCase : numpy.ndarray )->None: _UpperCAmelCase = self.modulus(__UpperCamelCase ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key _UpperCAmelCase = encrypt_key.shape[0] def lowercase__ ( self : Optional[Any] , __UpperCamelCase : str )->int: return self.key_string.index(__UpperCamelCase ) def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : int )->str: return self.key_string[round(__UpperCamelCase )] def lowercase__ ( self : Dict )->None: _UpperCAmelCase = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: _UpperCAmelCase = det % len(self.key_string ) _UpperCAmelCase = len(self.key_string ) if greatest_common_divisor(__UpperCamelCase , len(self.key_string ) ) != 1: _UpperCAmelCase = ( F'determinant modular {req_l} of encryption key({det}) ' F'is not co prime w.r.t {req_l}.\nTry another key.' ) raise ValueError(__UpperCamelCase ) def lowercase__ ( self : List[Any] , __UpperCamelCase : str )->str: _UpperCAmelCase = [char for char in text.upper() if char in self.key_string] _UpperCAmelCase = chars[-1] while len(__UpperCamelCase ) % self.break_key != 0: chars.append(__UpperCamelCase ) return "".join(__UpperCamelCase ) def lowercase__ ( self : List[Any] , __UpperCamelCase : str )->str: _UpperCAmelCase = self.process_text(text.upper() ) _UpperCAmelCase = '''''' for i in range(0 , len(__UpperCamelCase ) - self.break_key + 1 , self.break_key ): _UpperCAmelCase = text[i : i + self.break_key] _UpperCAmelCase = [self.replace_letters(__UpperCamelCase ) for char in batch] _UpperCAmelCase = numpy.array([vec] ).T _UpperCAmelCase = self.modulus(self.encrypt_key.dot(__UpperCamelCase ) ).T.tolist()[ 0 ] _UpperCAmelCase = ''''''.join( self.replace_digits(__UpperCamelCase ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def lowercase__ ( self : Any )->numpy.ndarray: _UpperCAmelCase = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: _UpperCAmelCase = det % len(self.key_string ) _UpperCAmelCase = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: _UpperCAmelCase = i break _UpperCAmelCase = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(__UpperCamelCase ) ) def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : str )->str: _UpperCAmelCase = self.make_decrypt_key() _UpperCAmelCase = self.process_text(text.upper() ) _UpperCAmelCase = '''''' for i in range(0 , len(__UpperCamelCase ) - self.break_key + 1 , self.break_key ): _UpperCAmelCase = text[i : i + self.break_key] _UpperCAmelCase = [self.replace_letters(__UpperCamelCase ) for char in batch] _UpperCAmelCase = numpy.array([vec] ).T _UpperCAmelCase = self.modulus(decrypt_key.dot(__UpperCamelCase ) ).T.tolist()[0] _UpperCAmelCase = ''''''.join( self.replace_digits(__UpperCamelCase ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def lowercase ( ): '''simple docstring''' _UpperCAmelCase = int(input('''Enter the order of the encryption key: ''' ) ) _UpperCAmelCase = [] print('''Enter each row of the encryption key with space separated integers''' ) for _ in range(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()] hill_matrix.append(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) ) print('''Would you like to encrypt or decrypt some text? (1 or 2)''' ) _UpperCAmelCase = input('''\n1. Encrypt\n2. Decrypt\n''' ) if option == "1": _UpperCAmelCase = input('''What text would you like to encrypt?: ''' ) print('''Your encrypted text is:''' ) print(hc.encrypt(_SCREAMING_SNAKE_CASE ) ) elif option == "2": _UpperCAmelCase = input('''What text would you like to decrypt?: ''' ) print('''Your decrypted text is:''' ) print(hc.decrypt(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
326
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu __A : Tuple = [ "EAGER", "AOT_EAGER", "INDUCTOR", "NVFUSER", "AOT_NVFUSER", "AOT_CUDAGRAPHS", "OFI", "FX2TRT", "ONNXRT", "IPEX", ] def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Tuple=None ): '''simple docstring''' _UpperCAmelCase = True while ask_again: _UpperCAmelCase = input(_SCREAMING_SNAKE_CASE ) try: if default is not None and len(_SCREAMING_SNAKE_CASE ) == 0: return default return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result except Exception: if error_message is not None: print(_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int]=[] , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Dict=0 ): '''simple docstring''' _UpperCAmelCase = BulletMenu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = menu.run(default_choice=_SCREAMING_SNAKE_CASE ) return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] ) def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] ) def lowercase ( _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' _UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' _UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] ) def lowercase ( _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] ) def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' return {"yes": True, "no": False}[value.lower()] class _a ( argparse.RawDescriptionHelpFormatter): """simple docstring""" def lowercase__ ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : List[Any] )->Optional[int]: _UpperCAmelCase = super()._format_usage(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = usage.replace('''<command> [<args>] ''' , '''''' ) return usage
326
1
"""simple docstring""" import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def lowercase ( _SCREAMING_SNAKE_CASE : Any ): # picklable for multiprocessing '''simple docstring''' return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def lowercase ( ): '''simple docstring''' with parallel_backend('''spark''' ): assert ParallelBackendConfig.backend_name == "spark" _UpperCAmelCase = [1, 2, 3] with pytest.raises(_SCREAMING_SNAKE_CASE ): with parallel_backend('''unsupported backend''' ): map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=2 ) with pytest.raises(_SCREAMING_SNAKE_CASE ): with parallel_backend('''unsupported backend''' ): map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize('''num_proc''' , [2, -1] ) def lowercase ( _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' _UpperCAmelCase = [1, 2] _UpperCAmelCase = {'''a''': 1, '''b''': 2} _UpperCAmelCase = {'''a''': [1, 2], '''b''': [3, 4]} _UpperCAmelCase = {'''a''': {'''1''': 1}, '''b''': 2} _UpperCAmelCase = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4} _UpperCAmelCase = [2, 3] _UpperCAmelCase = {'''a''': 2, '''b''': 3} _UpperCAmelCase = {'''a''': [2, 3], '''b''': [4, 5]} _UpperCAmelCase = {'''a''': {'''1''': 2}, '''b''': 3} _UpperCAmelCase = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5} with parallel_backend('''spark''' ): assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
326
"""simple docstring""" import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''--model_ckpt''' , type=_SCREAMING_SNAKE_CASE , default='''microsoft/unixcoder-base-nine''' ) parser.add_argument('''--num_epochs''' , type=_SCREAMING_SNAKE_CASE , default=5 ) parser.add_argument('''--batch_size''' , type=_SCREAMING_SNAKE_CASE , default=6 ) parser.add_argument('''--gradient_accumulation_steps''' , type=_SCREAMING_SNAKE_CASE , default=1 ) parser.add_argument('''--freeze''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE ) parser.add_argument('''--learning_rate''' , type=_SCREAMING_SNAKE_CASE , default=5E-4 ) parser.add_argument('''--seed''' , type=_SCREAMING_SNAKE_CASE , default=0 ) parser.add_argument('''--lr_scheduler_type''' , type=_SCREAMING_SNAKE_CASE , default='''cosine''' ) parser.add_argument('''--num_warmup_steps''' , type=_SCREAMING_SNAKE_CASE , default=10 ) parser.add_argument('''--weight_decay''' , type=_SCREAMING_SNAKE_CASE , default=0.01 ) parser.add_argument('''--output_dir''' , type=_SCREAMING_SNAKE_CASE , default='''./results''' ) return parser.parse_args() __A : Union[str, Any] = load("accuracy") def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = eval_pred _UpperCAmelCase = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 ) return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE ) class _a ( lowerCAmelCase): """simple docstring""" def __init__( self : str , __UpperCamelCase : Union[str, Any] )->None: super().__init__() _UpperCAmelCase = trainer def lowercase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] )->Any: if control.should_evaluate: _UpperCAmelCase = deepcopy(__UpperCamelCase ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' ) return control_copy def lowercase ( ): '''simple docstring''' _UpperCAmelCase = get_args() set_seed(args.seed ) _UpperCAmelCase = load_dataset('''codeparrot/codecomplex''' , split='''train''' ) _UpperCAmelCase = dataset.train_test_split(test_size=0.2 ) _UpperCAmelCase = train_test['''test'''].train_test_split(test_size=0.5 ) _UpperCAmelCase = DatasetDict( { '''train''': train_test['''train'''], '''test''': test_validation['''train'''], '''valid''': test_validation['''test'''], } ) print('''Loading tokenizer and model''' ) _UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt ) _UpperCAmelCase = tokenizer.eos_token _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 ) _UpperCAmelCase = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): _UpperCAmelCase = False _UpperCAmelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) ) def tokenize(_SCREAMING_SNAKE_CASE : Any ): _UpperCAmelCase = tokenizer(example['''src'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=1024 ) _UpperCAmelCase = labels.straint(example['''complexity'''] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } _UpperCAmelCase = train_test_validation.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=train_test_validation['''train'''].column_names , ) _UpperCAmelCase = DataCollatorWithPadding(tokenizer=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = TrainingArguments( output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , ) _UpperCAmelCase = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , ) print('''Training...''' ) trainer.add_callback(CustomCallback(_SCREAMING_SNAKE_CASE ) ) trainer.train() if __name__ == "__main__": main()
326
1
"""simple docstring""" import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer __A : Any = ["gpt2"] __A : Optional[int] = "gpt2" if is_tf_available(): class _a ( tf.Module): """simple docstring""" def __init__( self : List[str] , __UpperCamelCase : Dict )->Any: super().__init__() _UpperCAmelCase = tokenizer _UpperCAmelCase = AutoConfig.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = TFGPTaLMHeadModel.from_config(__UpperCamelCase ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) ) def lowercase__ ( self : Optional[int] , __UpperCamelCase : Optional[Any] )->Optional[Any]: _UpperCAmelCase = self.tokenizer(__UpperCamelCase ) _UpperCAmelCase = tokenized['''input_ids'''].to_tensor() _UpperCAmelCase = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) _UpperCAmelCase = self.model(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )['''logits'''] return outputs @require_tf @require_keras_nlp class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : Any )->List[str]: super().setUp() _UpperCAmelCase = [GPTaTokenizer.from_pretrained(__UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)] _UpperCAmelCase = [TFGPTaTokenizer.from_pretrained(__UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) _UpperCAmelCase = [ '''This is a straightforward English test sentence.''', '''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''', '''Now we\'re going to add some Chinese: 一 二 三 一二三''', '''And some much more rare Chinese: 齉 堃 齉堃''', '''Je vais aussi écrire en français pour tester les accents''', '''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''', ] _UpperCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def lowercase__ ( self : Optional[int] )->List[Any]: for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: _UpperCAmelCase = tokenizer([test_inputs] , return_tensors='''tf''' ) _UpperCAmelCase = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors _UpperCAmelCase = python_outputs[key].numpy() _UpperCAmelCase = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(__UpperCamelCase , tf.intaa ) == tf_outputs_values ) ) @slow def lowercase__ ( self : Tuple )->int: for tf_tokenizer in self.tf_tokenizers: _UpperCAmelCase = tf.function(__UpperCamelCase ) for test_inputs in self.test_sentences: _UpperCAmelCase = tf.constant(__UpperCamelCase ) _UpperCAmelCase = compiled_tokenizer(__UpperCamelCase ) _UpperCAmelCase = tf_tokenizer(__UpperCamelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def lowercase__ ( self : List[Any] )->int: for tf_tokenizer in self.tf_tokenizers: _UpperCAmelCase = ModelToSave(tokenizer=__UpperCamelCase ) _UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] ) _UpperCAmelCase = model.serving(__UpperCamelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: _UpperCAmelCase = Path(__UpperCamelCase ) / '''saved.model''' tf.saved_model.save(__UpperCamelCase , __UpperCamelCase , signatures={'''serving_default''': model.serving} ) _UpperCAmelCase = tf.saved_model.load(__UpperCamelCase ) _UpperCAmelCase = loaded_model.signatures['''serving_default'''](__UpperCamelCase )['''output_0'''] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def lowercase__ ( self : Any )->List[str]: for tf_tokenizer in self.tf_tokenizers: _UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] ) _UpperCAmelCase = tf_tokenizer(__UpperCamelCase ) # Build model with some sample inputs _UpperCAmelCase = tf_tokenizer.get_config() _UpperCAmelCase = TFGPTaTokenizer.from_config(__UpperCamelCase ) _UpperCAmelCase = model_from_config(__UpperCamelCase ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def lowercase__ ( self : str )->Union[str, Any]: for tf_tokenizer in self.tf_tokenizers: # for the test to run _UpperCAmelCase = 1_2_3_1_2_3 for max_length in [3, 5, 1_0_2_4]: _UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] ) _UpperCAmelCase = tf_tokenizer(__UpperCamelCase , max_length=__UpperCamelCase ) _UpperCAmelCase = out['''input_ids'''].numpy().shape[1] assert out_length == max_length
326
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return "\n".join( f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=10))
326
1
"""simple docstring""" import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = OmegaConf.load(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model'''] _UpperCAmelCase = list(state_dict.keys() ) # extract state_dict for VQVAE _UpperCAmelCase = {} _UpperCAmelCase = '''first_stage_model.''' for key in keys: if key.startswith(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = state_dict[key] # extract state_dict for UNetLDM _UpperCAmelCase = {} _UpperCAmelCase = '''model.diffusion_model.''' for key in keys: if key.startswith(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = state_dict[key] _UpperCAmelCase = config.model.params.first_stage_config.params _UpperCAmelCase = config.model.params.unet_config.params _UpperCAmelCase = VQModel(**_SCREAMING_SNAKE_CASE ).eval() vqvae.load_state_dict(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = UNetLDMModel(**_SCREAMING_SNAKE_CASE ).eval() unet.load_state_dict(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = LDMPipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) pipeline.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", type=str, required=True) parser.add_argument("--config_path", type=str, required=True) parser.add_argument("--output_path", type=str, required=True) __A : Dict = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
326
"""simple docstring""" class _a : """simple docstring""" def __init__( self : Tuple , __UpperCamelCase : list[int] )->None: _UpperCAmelCase = len(__UpperCamelCase ) _UpperCAmelCase = [0] * len_array if len_array > 0: _UpperCAmelCase = array[0] for i in range(1 , __UpperCamelCase ): _UpperCAmelCase = self.prefix_sum[i - 1] + array[i] def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : int )->int: if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowercase__ ( self : List[Any] , __UpperCamelCase : int )->bool: _UpperCAmelCase = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__UpperCamelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
326
1
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : int = 1000 ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = 1, 1 _UpperCAmelCase = 2 while True: _UpperCAmelCase = 0 _UpperCAmelCase = fa + fa _UpperCAmelCase , _UpperCAmelCase = fa, f index += 1 for _ in str(_SCREAMING_SNAKE_CASE ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
326
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Optional[int] = {"configuration_mmbt": ["MMBTConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
326
1
"""simple docstring""" from __future__ import annotations from collections import deque class _a : """simple docstring""" def __init__( self : List[str] , __UpperCamelCase : list[str] )->int: _UpperCAmelCase = [] self.adlist.append( {'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} ) for keyword in keywords: self.add_keyword(__UpperCamelCase ) self.set_fail_transitions() def lowercase__ ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : str )->int | None: for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : str )->None: _UpperCAmelCase = 0 for character in keyword: _UpperCAmelCase = self.find_next_state(__UpperCamelCase , __UpperCamelCase ) if next_state is None: self.adlist.append( { '''value''': character, '''next_states''': [], '''fail_state''': 0, '''output''': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) _UpperCAmelCase = len(self.adlist ) - 1 else: _UpperCAmelCase = next_state self.adlist[current_state]["output"].append(__UpperCamelCase ) def lowercase__ ( self : Tuple )->None: _UpperCAmelCase = deque() for node in self.adlist[0]["next_states"]: q.append(__UpperCamelCase ) _UpperCAmelCase = 0 while q: _UpperCAmelCase = q.popleft() for child in self.adlist[r]["next_states"]: q.append(__UpperCamelCase ) _UpperCAmelCase = self.adlist[r]['''fail_state'''] while ( self.find_next_state(__UpperCamelCase , self.adlist[child]['''value'''] ) is None and state != 0 ): _UpperCAmelCase = self.adlist[state]['''fail_state'''] _UpperCAmelCase = self.find_next_state( __UpperCamelCase , self.adlist[child]['''value'''] ) if self.adlist[child]["fail_state"] is None: _UpperCAmelCase = 0 _UpperCAmelCase = ( self.adlist[child]['''output'''] + self.adlist[self.adlist[child]['''fail_state''']]['''output'''] ) def lowercase__ ( self : int , __UpperCamelCase : str )->dict[str, list[int]]: _UpperCAmelCase = {} # returns a dict with keywords and list of its occurrences _UpperCAmelCase = 0 for i in range(len(__UpperCamelCase ) ): while ( self.find_next_state(__UpperCamelCase , string[i] ) is None and current_state != 0 ): _UpperCAmelCase = self.adlist[current_state]['''fail_state'''] _UpperCAmelCase = self.find_next_state(__UpperCamelCase , string[i] ) if next_state is None: _UpperCAmelCase = 0 else: _UpperCAmelCase = next_state for key in self.adlist[current_state]["output"]: if key not in result: _UpperCAmelCase = [] result[key].append(i - len(__UpperCamelCase ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
326
"""simple docstring""" __A : Tuple = frozenset( [ "prompt", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) __A : Union[str, Any] = frozenset(["prompt", "negative_prompt"]) __A : str = frozenset([]) __A : List[str] = frozenset(["image"]) __A : Optional[Any] = frozenset( [ "image", "height", "width", "guidance_scale", ] ) __A : Optional[int] = frozenset(["image"]) __A : Optional[int] = frozenset( [ "prompt", "image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) __A : Optional[Any] = frozenset(["prompt", "image", "negative_prompt"]) __A : str = frozenset( [ # Text guided image variation with an image mask "prompt", "image", "mask_image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) __A : Tuple = frozenset(["prompt", "image", "mask_image", "negative_prompt"]) __A : List[str] = frozenset( [ # image variation with an image mask "image", "mask_image", "height", "width", "guidance_scale", ] ) __A : List[Any] = frozenset(["image", "mask_image"]) __A : List[str] = frozenset( [ "example_image", "image", "mask_image", "height", "width", "guidance_scale", ] ) __A : Tuple = frozenset(["example_image", "image", "mask_image"]) __A : Dict = frozenset(["class_labels"]) __A : str = frozenset(["class_labels"]) __A : str = frozenset(["batch_size"]) __A : Union[str, Any] = frozenset([]) __A : str = frozenset(["batch_size"]) __A : Optional[int] = frozenset([]) __A : Any = frozenset( [ "prompt", "audio_length_in_s", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) __A : List[str] = frozenset(["prompt", "negative_prompt"]) __A : Tuple = frozenset(["input_tokens"]) __A : Optional[int] = frozenset(["input_tokens"])
326
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : List[str] = { "configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = ["AlbertTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ["AlbertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Dict = [ "ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "AlbertForMaskedLM", "AlbertForMultipleChoice", "AlbertForPreTraining", "AlbertForQuestionAnswering", "AlbertForSequenceClassification", "AlbertForTokenClassification", "AlbertModel", "AlbertPreTrainedModel", "load_tf_weights_in_albert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ "TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFAlbertForMaskedLM", "TFAlbertForMultipleChoice", "TFAlbertForPreTraining", "TFAlbertForQuestionAnswering", "TFAlbertForSequenceClassification", "TFAlbertForTokenClassification", "TFAlbertMainLayer", "TFAlbertModel", "TFAlbertPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = [ "FlaxAlbertForMaskedLM", "FlaxAlbertForMultipleChoice", "FlaxAlbertForPreTraining", "FlaxAlbertForQuestionAnswering", "FlaxAlbertForSequenceClassification", "FlaxAlbertForTokenClassification", "FlaxAlbertModel", "FlaxAlbertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
326
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : Optional[Any] = { "configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"], "convert_funnel_original_tf_checkpoint_to_pytorch": [], "tokenization_funnel": ["FunnelTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = ["FunnelTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "FunnelBaseModel", "FunnelForMaskedLM", "FunnelForMultipleChoice", "FunnelForPreTraining", "FunnelForQuestionAnswering", "FunnelForSequenceClassification", "FunnelForTokenClassification", "FunnelModel", "FunnelPreTrainedModel", "load_tf_weights_in_funnel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Dict = [ "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFunnelBaseModel", "TFFunnelForMaskedLM", "TFFunnelForMultipleChoice", "TFFunnelForPreTraining", "TFFunnelForQuestionAnswering", "TFFunnelForSequenceClassification", "TFFunnelForTokenClassification", "TFFunnelModel", "TFFunnelPreTrainedModel", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
326
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer __A : str = logging.get_logger(__name__) __A : str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A : Any = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } __A : Dict = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } __A : str = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = SqueezeBertTokenizer def __init__( self : Dict , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : int="[UNK]" , __UpperCamelCase : List[str]="[SEP]" , __UpperCamelCase : List[Any]="[PAD]" , __UpperCamelCase : Any="[CLS]" , __UpperCamelCase : int="[MASK]" , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : List[str]=None , **__UpperCamelCase : List[str] , )->Any: super().__init__( __UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , **__UpperCamelCase , ) _UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , __UpperCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , __UpperCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __UpperCamelCase ) != tokenize_chinese_chars ): _UpperCAmelCase = getattr(__UpperCamelCase , normalizer_state.pop('''type''' ) ) _UpperCAmelCase = do_lower_case _UpperCAmelCase = strip_accents _UpperCAmelCase = tokenize_chinese_chars _UpperCAmelCase = normalizer_class(**__UpperCamelCase ) _UpperCAmelCase = do_lower_case def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any=None )->List[Any]: _UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ ( self : List[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None )->List[int]: _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None )->Tuple[str]: _UpperCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase ) return tuple(__UpperCamelCase )
326
"""simple docstring""" import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __A : Union[str, Any] = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. __A : Tuple = importlib.util.spec_from_file_location( "transformers", os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __A : List[str] = spec.loader.load_module() __A : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __A : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)") __A : List[str] = { "CLIPConfigMixin", "DecisionTransformerConfigMixin", "EncoderDecoderConfigMixin", "RagConfigMixin", "SpeechEncoderDecoderConfigMixin", "VisionEncoderDecoderConfigMixin", "VisionTextDualEncoderConfigMixin", } def lowercase ( ): '''simple docstring''' _UpperCAmelCase = [] for config_class in list(CONFIG_MAPPING.values() ): _UpperCAmelCase = False # source code of `config_class` _UpperCAmelCase = inspect.getsource(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = _re_checkpoint.findall(_SCREAMING_SNAKE_CASE ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` _UpperCAmelCase , _UpperCAmelCase = checkpoint # verify the checkpoint name corresponds to the checkpoint link _UpperCAmelCase = f'https://huggingface.co/{ckpt_name}' if ckpt_link == ckpt_link_from_name: _UpperCAmelCase = True break _UpperCAmelCase = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: _UpperCAmelCase = '''\n'''.join(sorted(_SCREAMING_SNAKE_CASE ) ) raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
326
1
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu __A : Tuple = [ "EAGER", "AOT_EAGER", "INDUCTOR", "NVFUSER", "AOT_NVFUSER", "AOT_CUDAGRAPHS", "OFI", "FX2TRT", "ONNXRT", "IPEX", ] def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Tuple=None ): '''simple docstring''' _UpperCAmelCase = True while ask_again: _UpperCAmelCase = input(_SCREAMING_SNAKE_CASE ) try: if default is not None and len(_SCREAMING_SNAKE_CASE ) == 0: return default return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result except Exception: if error_message is not None: print(_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int]=[] , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Dict=0 ): '''simple docstring''' _UpperCAmelCase = BulletMenu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = menu.run(default_choice=_SCREAMING_SNAKE_CASE ) return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] ) def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] ) def lowercase ( _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' _UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' _UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] ) def lowercase ( _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] ) def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' return {"yes": True, "no": False}[value.lower()] class _a ( argparse.RawDescriptionHelpFormatter): """simple docstring""" def lowercase__ ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : List[Any] )->Optional[int]: _UpperCAmelCase = super()._format_usage(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = usage.replace('''<command> [<args>] ''' , '''''' ) return usage
326
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if bit_count < 0: raise ValueError('''The given input must be positive''' ) # get the generated string sequence _UpperCAmelCase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE ) # # convert them to integers for i in range(len(_SCREAMING_SNAKE_CASE ) ): _UpperCAmelCase = int(sequence[i] , 2 ) return sequence def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] _UpperCAmelCase = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits _UpperCAmelCase = gray_code_sequence_string(bit_count - 1 ) _UpperCAmelCase = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): _UpperCAmelCase = '''0''' + smaller_sequence[i] sequence.append(_SCREAMING_SNAKE_CASE ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): _UpperCAmelCase = '''1''' + smaller_sequence[i] sequence.append(_SCREAMING_SNAKE_CASE ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
326
1
"""simple docstring""" import datasets __A : List[str] = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n" __A : Tuple = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n" __A : Tuple = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n" def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _a ( datasets.Metric): """simple docstring""" def lowercase__ ( self : Tuple )->Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), '''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , ) def lowercase__ ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple )->Dict: return {"accuracy": simple_accuracy(__UpperCamelCase , __UpperCamelCase )}
326
"""simple docstring""" import math def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 0 , _SCREAMING_SNAKE_CASE : int = 0 ): '''simple docstring''' _UpperCAmelCase = end or len(_SCREAMING_SNAKE_CASE ) for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = i _UpperCAmelCase = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: _UpperCAmelCase = array[temp_index - 1] temp_index -= 1 _UpperCAmelCase = temp_index_value return array def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): # Max Heap '''simple docstring''' _UpperCAmelCase = index _UpperCAmelCase = 2 * index + 1 # Left Node _UpperCAmelCase = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: _UpperCAmelCase = left_index if right_index < heap_size and array[largest] < array[right_index]: _UpperCAmelCase = right_index if largest != index: _UpperCAmelCase , _UpperCAmelCase = array[largest], array[index] heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : list ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) for i in range(n // 2 , -1 , -1 ): heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in range(n - 1 , 0 , -1 ): _UpperCAmelCase , _UpperCAmelCase = array[0], array[i] heapify(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE ) return array def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = low _UpperCAmelCase = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i _UpperCAmelCase , _UpperCAmelCase = array[j], array[i] i += 1 def lowercase ( _SCREAMING_SNAKE_CASE : list ): '''simple docstring''' if len(_SCREAMING_SNAKE_CASE ) == 0: return array _UpperCAmelCase = 2 * math.ceil(math.loga(len(_SCREAMING_SNAKE_CASE ) ) ) _UpperCAmelCase = 16 return intro_sort(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(_SCREAMING_SNAKE_CASE ) max_depth -= 1 _UpperCAmelCase = median_of_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 ) _UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) intro_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = p return insertion_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() __A : List[str] = input("Enter numbers separated by a comma : ").strip() __A : Optional[Any] = [float(item) for item in user_input.split(",")] print(sort(unsorted))
326
1
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return "\n".join( f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=10))
326
"""simple docstring""" from __future__ import annotations import numpy as np def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = np.shape(_SCREAMING_SNAKE_CASE ) if rows != columns: _UpperCAmelCase = ( '''\'table\' has to be of square shaped array but got a ''' f'{rows}x{columns} array:\n{table}' ) raise ValueError(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = np.zeros((rows, columns) ) _UpperCAmelCase = np.zeros((rows, columns) ) for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) ) if upper[j][j] == 0: raise ArithmeticError('''No LU decomposition exists''' ) _UpperCAmelCase = (table[i][j] - total) / upper[j][j] _UpperCAmelCase = 1 for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
326
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A : Optional[int] = { "configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"], "configuration_data2vec_text": [ "DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecTextConfig", "Data2VecTextOnnxConfig", ], "configuration_data2vec_vision": [ "DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecVisionConfig", "Data2VecVisionOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = [ "DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecAudioForAudioFrameClassification", "Data2VecAudioForCTC", "Data2VecAudioForSequenceClassification", "Data2VecAudioForXVector", "Data2VecAudioModel", "Data2VecAudioPreTrainedModel", ] __A : Optional[Any] = [ "DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecTextForCausalLM", "Data2VecTextForMaskedLM", "Data2VecTextForMultipleChoice", "Data2VecTextForQuestionAnswering", "Data2VecTextForSequenceClassification", "Data2VecTextForTokenClassification", "Data2VecTextModel", "Data2VecTextPreTrainedModel", ] __A : Any = [ "DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecVisionForImageClassification", "Data2VecVisionForMaskedImageModeling", "Data2VecVisionForSemanticSegmentation", "Data2VecVisionModel", "Data2VecVisionPreTrainedModel", ] if is_tf_available(): __A : Tuple = [ "TFData2VecVisionForImageClassification", "TFData2VecVisionForSemanticSegmentation", "TFData2VecVisionModel", "TFData2VecVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
326
"""simple docstring""" import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _a ( lowerCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = CTRLTokenizer UpperCamelCase__ = False UpperCamelCase__ = False def lowercase__ ( self : Dict )->str: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCAmelCase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>'''] _UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) ) _UpperCAmelCase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', ''''''] _UpperCAmelCase = {'''unk_token''': '''<unk>'''} _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__UpperCamelCase ) ) def lowercase__ ( self : str , **__UpperCamelCase : Union[str, Any] )->Any: kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[int] )->Tuple: _UpperCAmelCase = '''adapt react readapt apt''' _UpperCAmelCase = '''adapt react readapt apt''' return input_text, output_text def lowercase__ ( self : Dict )->Optional[int]: _UpperCAmelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _UpperCAmelCase = '''adapt react readapt apt''' _UpperCAmelCase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split() _UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = tokens + [tokenizer.unk_token] _UpperCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
326
1
"""simple docstring""" import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _a ( lowerCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = BertTokenizer UpperCamelCase__ = BertTokenizerFast UpperCamelCase__ = True UpperCamelCase__ = True UpperCamelCase__ = filter_non_english def lowercase__ ( self : List[Any] )->Any: super().setUp() _UpperCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : int )->Any: _UpperCAmelCase = '''UNwant\u00E9d,running''' _UpperCAmelCase = '''unwanted, running''' return input_text, output_text def lowercase__ ( self : Dict )->List[str]: _UpperCAmelCase = self.tokenizer_class(self.vocab_file ) _UpperCAmelCase = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__UpperCamelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] ) def lowercase__ ( self : Any )->List[str]: if not self.test_rust_tokenizer: return _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = '''UNwant\u00E9d,running''' _UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase ) _UpperCAmelCase = rust_tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) _UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = tokenizer.encode(__UpperCamelCase ) _UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) # With lower casing _UpperCAmelCase = self.get_tokenizer(do_lower_case=__UpperCamelCase ) _UpperCAmelCase = self.get_rust_tokenizer(do_lower_case=__UpperCamelCase ) _UpperCAmelCase = '''UNwant\u00E9d,running''' _UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase ) _UpperCAmelCase = rust_tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) _UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = tokenizer.encode(__UpperCamelCase ) _UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : str )->Any: _UpperCAmelCase = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def lowercase__ ( self : Optional[int] )->Optional[int]: _UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def lowercase__ ( self : Dict )->int: _UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def lowercase__ ( self : Optional[int] )->Tuple: _UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def lowercase__ ( self : Union[str, Any] )->Any: _UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def lowercase__ ( self : Union[str, Any] )->Tuple: _UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def lowercase__ ( self : int )->Optional[int]: _UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def lowercase__ ( self : List[str] )->int: _UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def lowercase__ ( self : str )->Tuple: _UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def lowercase__ ( self : Tuple )->str: _UpperCAmelCase = BasicTokenizer() _UpperCAmelCase = '''a\n\'ll !!to?\'d of, can\'t.''' _UpperCAmelCase = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.'''] self.assertListEqual(tokenizer.tokenize(__UpperCamelCase ) , __UpperCamelCase ) def lowercase__ ( self : str )->Dict: _UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] _UpperCAmelCase = {} for i, token in enumerate(__UpperCamelCase ): _UpperCAmelCase = i _UpperCAmelCase = WordpieceTokenizer(vocab=__UpperCamelCase , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def lowercase__ ( self : int )->Dict: self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def lowercase__ ( self : Tuple )->Dict: self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def lowercase__ ( self : List[Any] )->Dict: self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def lowercase__ ( self : List[str] )->Any: _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(__UpperCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(__UpperCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def lowercase__ ( self : Optional[int] )->Tuple: _UpperCAmelCase = self.tokenizer_class.from_pretrained('''bert-base-uncased''' ) _UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCamelCase ) _UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCamelCase ) _UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase ) _UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase ) assert encoded_sentence == [1_0_1] + text + [1_0_2] assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2] def lowercase__ ( self : Union[str, Any] )->List[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.' _UpperCAmelCase = tokenizer_r.encode_plus( __UpperCamelCase , return_attention_mask=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase , ) _UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(__UpperCamelCase , '''do_lower_case''' ) else False _UpperCAmelCase = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''Allen'''), ((2_1, 2_3), '''##NL'''), ((2_3, 2_4), '''##P'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''allen'''), ((2_1, 2_3), '''##nl'''), ((2_3, 2_4), '''##p'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def lowercase__ ( self : List[Any] )->Optional[int]: _UpperCAmelCase = ['''的''', '''人''', '''有'''] _UpperCAmelCase = ''''''.join(__UpperCamelCase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _UpperCAmelCase = True _UpperCAmelCase = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = tokenizer_p.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) _UpperCAmelCase = tokenizer_r.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) _UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(__UpperCamelCase ) _UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(__UpperCamelCase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = False _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = tokenizer_r.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) _UpperCAmelCase = tokenizer_p.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) _UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(__UpperCamelCase ) _UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(__UpperCamelCase ) # it is expected that only the first Chinese character is not preceded by "##". _UpperCAmelCase = [ F'##{token}' if idx != 0 else token for idx, token in enumerate(__UpperCamelCase ) ] self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
326
"""simple docstring""" import logging import os from .state import PartialState class _a ( logging.LoggerAdapter): """simple docstring""" @staticmethod def lowercase__ ( __UpperCamelCase : Optional[Any] )->List[Any]: _UpperCAmelCase = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Union[str, Any] )->int: if PartialState._shared_state == {}: raise RuntimeError( '''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' ) _UpperCAmelCase = kwargs.pop('''main_process_only''' , __UpperCamelCase ) _UpperCAmelCase = kwargs.pop('''in_order''' , __UpperCamelCase ) if self.isEnabledFor(__UpperCamelCase ): if self._should_log(__UpperCamelCase ): _UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase ) self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) elif in_order: _UpperCAmelCase = PartialState() for i in range(state.num_processes ): if i == state.process_index: _UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase ) self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) state.wait_for_everyone() def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = None ): '''simple docstring''' if log_level is None: _UpperCAmelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = logging.getLogger(_SCREAMING_SNAKE_CASE ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(_SCREAMING_SNAKE_CASE , {} )
326
1
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : int = 100_0000 ): '''simple docstring''' _UpperCAmelCase = limit + 1 _UpperCAmelCase = [0] * limit for first_term in range(1 , _SCREAMING_SNAKE_CASE ): for n in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a _UpperCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f'''{solution() = }''')
326
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __A : List[Any] = logging.get_logger(__name__) class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = ["""pixel_values"""] def __init__( self : Tuple , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Dict[str, int]] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_5_5 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : Tuple , )->None: super().__init__(**__UpperCamelCase ) _UpperCAmelCase = size if size is not None else {'''shortest_edge''': 2_5_6} _UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) _UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} _UpperCAmelCase = get_size_dict(__UpperCamelCase ) _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = resample _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : int , )->np.ndarray: _UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) _UpperCAmelCase = get_resize_output_image_size(__UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCamelCase ) return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : Dict , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , )->np.ndarray: _UpperCAmelCase = get_size_dict(__UpperCamelCase ) return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : Any , __UpperCamelCase : np.ndarray , __UpperCamelCase : float , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Union[str, Any] )->np.ndarray: return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , )->np.ndarray: return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : List[str] , __UpperCamelCase : ImageInput , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[float] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__UpperCamelCase : str , )->List[Any]: _UpperCAmelCase = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase = size if size is not None else self.size _UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) _UpperCAmelCase = resample if resample is not None else self.resample _UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCAmelCase = crop_size if crop_size is not None else self.crop_size _UpperCAmelCase = get_size_dict(__UpperCamelCase ) _UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase = image_mean if image_mean is not None else self.image_mean _UpperCAmelCase = image_std if image_std is not None else self.image_std _UpperCAmelCase = make_list_of_images(__UpperCamelCase ) if not valid_images(__UpperCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _UpperCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images] if do_resize: _UpperCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images] if do_center_crop: _UpperCAmelCase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images] if do_rescale: _UpperCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images] if do_normalize: _UpperCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images] _UpperCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images] _UpperCAmelCase = {'''pixel_values''': images} return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
326
1
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): _UpperCAmelCase = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): _UpperCAmelCase = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: _UpperCAmelCase = subset[i - 1][j] if arr[i - 1] <= j: _UpperCAmelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
326
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __A : List[Any] = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys __A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
326
1
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __A : List[Any] = logging.get_logger(__name__) __A : Any = { "facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json", # See all DETR models at https://huggingface.co/models?filter=detr } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """detr""" UpperCamelCase__ = ["""past_key_values"""] UpperCamelCase__ = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : Union[str, Any] , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : List[str]=None , __UpperCamelCase : Any=3 , __UpperCamelCase : List[str]=1_0_0 , __UpperCamelCase : str=6 , __UpperCamelCase : Optional[Any]=2_0_4_8 , __UpperCamelCase : Tuple=8 , __UpperCamelCase : Any=6 , __UpperCamelCase : str=2_0_4_8 , __UpperCamelCase : Any=8 , __UpperCamelCase : Any=0.0 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Dict="relu" , __UpperCamelCase : Union[str, Any]=2_5_6 , __UpperCamelCase : str=0.1 , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Union[str, Any]=1.0 , __UpperCamelCase : int=False , __UpperCamelCase : List[Any]="sine" , __UpperCamelCase : int="resnet50" , __UpperCamelCase : str=True , __UpperCamelCase : Dict=False , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : Optional[Any]=5 , __UpperCamelCase : int=2 , __UpperCamelCase : str=1 , __UpperCamelCase : Dict=1 , __UpperCamelCase : int=5 , __UpperCamelCase : str=2 , __UpperCamelCase : Any=0.1 , **__UpperCamelCase : List[Any] , )->List[str]: if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) _UpperCAmelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): _UpperCAmelCase = backbone_config.get('''model_type''' ) _UpperCAmelCase = CONFIG_MAPPING[backbone_model_type] _UpperCAmelCase = config_class.from_dict(__UpperCamelCase ) # set timm attributes to None _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None, None, None _UpperCAmelCase = use_timm_backbone _UpperCAmelCase = backbone_config _UpperCAmelCase = num_channels _UpperCAmelCase = num_queries _UpperCAmelCase = d_model _UpperCAmelCase = encoder_ffn_dim _UpperCAmelCase = encoder_layers _UpperCAmelCase = encoder_attention_heads _UpperCAmelCase = decoder_ffn_dim _UpperCAmelCase = decoder_layers _UpperCAmelCase = decoder_attention_heads _UpperCAmelCase = dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = activation_dropout _UpperCAmelCase = activation_function _UpperCAmelCase = init_std _UpperCAmelCase = init_xavier_std _UpperCAmelCase = encoder_layerdrop _UpperCAmelCase = decoder_layerdrop _UpperCAmelCase = encoder_layers _UpperCAmelCase = auxiliary_loss _UpperCAmelCase = position_embedding_type _UpperCAmelCase = backbone _UpperCAmelCase = use_pretrained_backbone _UpperCAmelCase = dilation # Hungarian matcher _UpperCAmelCase = class_cost _UpperCAmelCase = bbox_cost _UpperCAmelCase = giou_cost # Loss coefficients _UpperCAmelCase = mask_loss_coefficient _UpperCAmelCase = dice_loss_coefficient _UpperCAmelCase = bbox_loss_coefficient _UpperCAmelCase = giou_loss_coefficient _UpperCAmelCase = eos_coefficient super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase ) @property def lowercase__ ( self : int )->int: return self.encoder_attention_heads @property def lowercase__ ( self : int )->int: return self.d_model @classmethod def lowercase__ ( cls : str , __UpperCamelCase : PretrainedConfig , **__UpperCamelCase : Dict )->str: return cls(backbone_config=__UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : Optional[int] )->Dict[str, any]: _UpperCAmelCase = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: _UpperCAmelCase = self.backbone_config.to_dict() _UpperCAmelCase = self.__class__.model_type return output class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = version.parse("""1.11""") @property def lowercase__ ( self : List[str] )->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def lowercase__ ( self : List[Any] )->float: return 1e-5 @property def lowercase__ ( self : List[Any] )->int: return 1_2
326
"""simple docstring""" from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = None UpperCamelCase__ = None __A : Union[str, Any] = namedtuple("CoinsDistribResult", "moves excess") def lowercase ( _SCREAMING_SNAKE_CASE : TreeNode | None ): '''simple docstring''' if root is None: return 0 # Validation def count_nodes(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ): raise ValueError('''The nodes number should be same as the number of coins''' ) # Main calculation def get_distrib(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) _UpperCAmelCase , _UpperCAmelCase = get_distrib(node.left ) _UpperCAmelCase , _UpperCAmelCase = get_distrib(node.right ) _UpperCAmelCase = 1 - left_distrib_excess _UpperCAmelCase = 1 - right_distrib_excess _UpperCAmelCase = ( left_distrib_moves + right_distrib_moves + abs(_SCREAMING_SNAKE_CASE ) + abs(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = node.data - coins_to_left - coins_to_right return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return get_distrib(_SCREAMING_SNAKE_CASE )[0] if __name__ == "__main__": import doctest doctest.testmod()
326
1
"""simple docstring""" import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class _a ( nn.Module): """simple docstring""" def __init__( self : List[Any] , __UpperCamelCase : nn.Module , __UpperCamelCase : int )->Dict: super().__init__() _UpperCAmelCase = module _UpperCAmelCase = nn.Sequential( nn.Linear(module.in_features , __UpperCamelCase , bias=__UpperCamelCase ) , nn.Linear(__UpperCamelCase , module.out_features , bias=__UpperCamelCase ) , ) _UpperCAmelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=__UpperCamelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Dict , *__UpperCamelCase : List[str] , **__UpperCamelCase : Union[str, Any] )->Any: return self.module(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) + self.adapter(__UpperCamelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _a ( unittest.TestCase): """simple docstring""" # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module UpperCamelCase__ = """bigscience/bloom-1b7""" # Constant values UpperCamelCase__ = 2.1_09_65_95_52_69_25_74 UpperCamelCase__ = """Hello my name is""" UpperCamelCase__ = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""") EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""") EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""") UpperCamelCase__ = 10 def lowercase__ ( self : List[Any] )->Optional[Any]: # Models and tokenizer _UpperCAmelCase = AutoTokenizer.from_pretrained(self.model_name ) class _a ( lowerCAmelCase): """simple docstring""" def lowercase__ ( self : Any )->Optional[int]: super().setUp() # Models and tokenizer _UpperCAmelCase = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='''auto''' ) _UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase , device_map='''auto''' ) def lowercase__ ( self : Any )->Tuple: del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : int )->Optional[Any]: _UpperCAmelCase = self.model_abit.config self.assertTrue(hasattr(__UpperCamelCase , '''quantization_config''' ) ) _UpperCAmelCase = config.to_dict() _UpperCAmelCase = config.to_diff_dict() _UpperCAmelCase = config.to_json_string() def lowercase__ ( self : Union[str, Any] )->Tuple: from bitsandbytes.nn import Paramsabit _UpperCAmelCase = self.model_fpaa.get_memory_footprint() _UpperCAmelCase = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) _UpperCAmelCase = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def lowercase__ ( self : Optional[int] )->Optional[Any]: from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__UpperCamelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def lowercase__ ( self : Any )->Union[str, Any]: _UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) _UpperCAmelCase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCamelCase ) , self.EXPECTED_OUTPUTS ) def lowercase__ ( self : Union[str, Any] )->str: _UpperCAmelCase = BitsAndBytesConfig() _UpperCAmelCase = True _UpperCAmelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCamelCase , device_map='''auto''' ) _UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) _UpperCAmelCase = model_abit_from_config.generate( input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCamelCase ) , self.EXPECTED_OUTPUTS ) def lowercase__ ( self : Optional[int] )->Union[str, Any]: with self.assertRaises(__UpperCamelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__UpperCamelCase ) def lowercase__ ( self : List[str] )->str: _UpperCAmelCase = BitsAndBytesConfig() with self.assertRaises(__UpperCamelCase ): _UpperCAmelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__UpperCamelCase , load_in_abit=__UpperCamelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , ) def lowercase__ ( self : List[str] )->Tuple: with self.assertRaises(__UpperCamelCase ): # Tries with `str` self.model_abit.to('''cpu''' ) with self.assertRaises(__UpperCamelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__UpperCamelCase ): # Tries with a `device` self.model_abit.to(torch.device('''cuda:0''' ) ) with self.assertRaises(__UpperCamelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__UpperCamelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything _UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) _UpperCAmelCase = self.model_fpaa.to(torch.floataa ) _UpperCAmelCase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 ) # Check this does not throw an error _UpperCAmelCase = self.model_fpaa.to('''cpu''' ) # Check this does not throw an error _UpperCAmelCase = self.model_fpaa.half() # Check this does not throw an error _UpperCAmelCase = self.model_fpaa.float() def lowercase__ ( self : str )->Tuple: _UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__UpperCamelCase , device_map='''auto''' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _a ( unittest.TestCase): """simple docstring""" @classmethod def lowercase__ ( cls : Optional[Any] )->Optional[int]: _UpperCAmelCase = '''t5-small''' _UpperCAmelCase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense _UpperCAmelCase = AutoTokenizer.from_pretrained(cls.model_name ) _UpperCAmelCase = '''Translate in German: Hello, my dog is cute''' def lowercase__ ( self : Union[str, Any] )->Union[str, Any]: gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : List[Any] )->Any: from transformers import TaForConditionalGeneration _UpperCAmelCase = TaForConditionalGeneration._keep_in_fpaa_modules _UpperCAmelCase = None # test with `t5-small` _UpperCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase , device_map='''auto''' ) _UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) _UpperCAmelCase = model.generate(**__UpperCamelCase ) # test with `flan-t5-small` _UpperCAmelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCamelCase , device_map='''auto''' ) _UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) _UpperCAmelCase = model.generate(**__UpperCamelCase ) _UpperCAmelCase = modules def lowercase__ ( self : Union[str, Any] )->List[str]: import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` _UpperCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase , device_map='''auto''' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) _UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) _UpperCAmelCase = model.generate(**__UpperCamelCase ) # test with `flan-t5-small` _UpperCAmelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__UpperCamelCase , device_map='''auto''' ) _UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) _UpperCAmelCase = model.generate(**__UpperCamelCase ) class _a ( lowerCAmelCase): """simple docstring""" def lowercase__ ( self : str )->Tuple: super().setUp() # model_name _UpperCAmelCase = '''bigscience/bloom-560m''' _UpperCAmelCase = '''t5-small''' # Different types of model _UpperCAmelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase , device_map='''auto''' ) # Sequence classification model _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=__UpperCamelCase , device_map='''auto''' ) # CausalLM model _UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase , device_map='''auto''' ) # Seq2seq model _UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=__UpperCamelCase , device_map='''auto''' ) def lowercase__ ( self : int )->List[Any]: del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : List[str] )->Any: from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class _a ( lowerCAmelCase): """simple docstring""" def lowercase__ ( self : Tuple )->Union[str, Any]: super().setUp() def lowercase__ ( self : str )->Optional[int]: del self.pipe gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : List[Any] )->List[str]: _UpperCAmelCase = pipeline( '''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass _UpperCAmelCase = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class _a ( lowerCAmelCase): """simple docstring""" def lowercase__ ( self : Optional[int] )->str: super().setUp() def lowercase__ ( self : List[str] )->str: _UpperCAmelCase = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=__UpperCamelCase , device_map='''balanced''' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model _UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) # Second real batch _UpperCAmelCase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCamelCase ) , self.EXPECTED_OUTPUTS ) class _a ( lowerCAmelCase): """simple docstring""" def lowercase__ ( self : List[Any] )->List[Any]: _UpperCAmelCase = '''facebook/opt-350m''' super().setUp() def lowercase__ ( self : List[Any] )->str: if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ): return # Step 1: freeze all parameters _UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): _UpperCAmelCase = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability _UpperCAmelCase = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__UpperCamelCase ) ): _UpperCAmelCase = LoRALayer(module.q_proj , rank=1_6 ) _UpperCAmelCase = LoRALayer(module.k_proj , rank=1_6 ) _UpperCAmelCase = LoRALayer(module.v_proj , rank=1_6 ) # Step 3: dummy batch _UpperCAmelCase = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): _UpperCAmelCase = model.forward(**__UpperCamelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(__UpperCamelCase , __UpperCamelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__UpperCamelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """gpt2-xl""" UpperCamelCase__ = 3.31_91_85_48_54_15_21_87
326
"""simple docstring""" from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) UpperCamelCase__ = ( { """feature-extraction""": TFMobileBertModel, """fill-mask""": TFMobileBertForMaskedLM, """question-answering""": TFMobileBertForQuestionAnswering, """text-classification""": TFMobileBertForSequenceClassification, """token-classification""": TFMobileBertForTokenClassification, """zero-shot""": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False def lowercase__ ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str=False )->Optional[Any]: _UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) if return_labels: if model_class in get_values(__UpperCamelCase ): _UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class _a ( lowerCAmelCase): """simple docstring""" def __init__( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=1_3 , __UpperCamelCase : Any=7 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Dict=9_9 , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Optional[Any]=3_7 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Optional[Any]=5_1_2 , __UpperCamelCase : Any=1_6 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : List[str]=None , )->Any: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope _UpperCAmelCase = embedding_size def lowercase__ ( self : Optional[int] )->int: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] )->List[Any]: _UpperCAmelCase = TFMobileBertModel(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = [input_ids, input_mask] _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowercase__ ( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->Tuple: _UpperCAmelCase = TFMobileBertForMaskedLM(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Any )->List[Any]: _UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict )->List[Any]: _UpperCAmelCase = TFMobileBertForPreTraining(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] )->Any: _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForSequenceClassification(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] )->List[str]: _UpperCAmelCase = self.num_choices _UpperCAmelCase = TFMobileBertForMultipleChoice(config=__UpperCamelCase ) _UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any )->Dict: _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForTokenClassification(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->List[Any]: _UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : List[str] )->Optional[Any]: _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict def lowercase__ ( self : List[Any] )->str: _UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 ) def lowercase__ ( self : List[Any] )->List[str]: self.config_tester.run_common_tests() def lowercase__ ( self : Optional[Any] )->Union[str, Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__UpperCamelCase ) def lowercase__ ( self : Any )->Union[str, Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__UpperCamelCase ) def lowercase__ ( self : List[Any] )->Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__UpperCamelCase ) def lowercase__ ( self : str )->Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__UpperCamelCase ) def lowercase__ ( self : Any )->List[str]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__UpperCamelCase ) def lowercase__ ( self : Dict )->Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__UpperCamelCase ) def lowercase__ ( self : Any )->Optional[Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__UpperCamelCase ) def lowercase__ ( self : List[str] )->Tuple: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__UpperCamelCase ) @slow def lowercase__ ( self : Tuple )->List[str]: # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _UpperCAmelCase = TFMobileBertModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) @require_tf class _a ( unittest.TestCase): """simple docstring""" @slow def lowercase__ ( self : str )->Dict: _UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' ) _UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) _UpperCAmelCase = model(__UpperCamelCase )[0] _UpperCAmelCase = [1, 6, 3_0_5_2_2] self.assertEqual(output.shape , __UpperCamelCase ) _UpperCAmelCase = tf.constant( [ [ [-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6], [-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7], [-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 )
326
1
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
326
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('''Input value must be a \'int\' type''' ) return bin(_SCREAMING_SNAKE_CASE ).count('''1''' ) if __name__ == "__main__": import doctest doctest.testmod()
326
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __A : List[Any] = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys __A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
326
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow __A : Tuple = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""") @require_torch @require_tf @slow class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Path , __UpperCamelCase : Union[str, None] = None , __UpperCamelCase : Union[List[str], None] = None , __UpperCamelCase : Union[str, List[str], None] = None , __UpperCamelCase : bool = True , )->Tuple: _UpperCAmelCase = [file for file in os.listdir(__UpperCamelCase ) if os.path.isfile(os.path.join(__UpperCamelCase , __UpperCamelCase ) )] if identifier is not None: _UpperCAmelCase = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(__UpperCamelCase , __UpperCamelCase ): for n_ in n_identifier: _UpperCAmelCase = [file for file in files if n_ not in file] else: _UpperCAmelCase = [file for file in files if n_identifier not in file] _UpperCAmelCase = ignore_files or [] ignore_files.append('''__init__.py''' ) _UpperCAmelCase = [file for file in files if file not in ignore_files] for file in files: # Open all files print('''Testing''' , __UpperCamelCase ) if only_modules: _UpperCAmelCase = file.split('''.''' )[0] try: _UpperCAmelCase = getattr(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = doctest.DocTestSuite(__UpperCamelCase ) _UpperCAmelCase = unittest.TextTestRunner().run(__UpperCamelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F'{module_identifier} is not a module.' ) else: _UpperCAmelCase = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def lowercase__ ( self : str )->int: _UpperCAmelCase = Path('''src/transformers''' ) _UpperCAmelCase = '''modeling''' _UpperCAmelCase = [ '''modeling_ctrl.py''', '''modeling_tf_ctrl.py''', ] self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase , ignore_files=__UpperCamelCase ) def lowercase__ ( self : List[Any] )->int: _UpperCAmelCase = Path('''src/transformers''' ) _UpperCAmelCase = '''tokenization''' self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase ) def lowercase__ ( self : str )->Any: _UpperCAmelCase = Path('''src/transformers''' ) _UpperCAmelCase = '''configuration''' self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase ) def lowercase__ ( self : int )->Optional[Any]: _UpperCAmelCase = Path('''src/transformers''' ) _UpperCAmelCase = ['''configuration''', '''modeling''', '''tokenization'''] self.analyze_directory(__UpperCamelCase , n_identifier=__UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->Any: _UpperCAmelCase = Path('''docs/source''' ) _UpperCAmelCase = ['''favicon.ico'''] self.analyze_directory(__UpperCamelCase , ignore_files=__UpperCamelCase , only_modules=__UpperCamelCase )
326
1
"""simple docstring""" import argparse import datetime def lowercase ( _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = { '''0''': '''Sunday''', '''1''': '''Monday''', '''2''': '''Tuesday''', '''3''': '''Wednesday''', '''4''': '''Thursday''', '''5''': '''Friday''', '''6''': '''Saturday''', } _UpperCAmelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(_SCREAMING_SNAKE_CASE ) < 11: raise ValueError('''Must be 10 characters long''' ) # Get month _UpperCAmelCase = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError('''Month must be between 1 - 12''' ) _UpperCAmelCase = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''' ) # Get day _UpperCAmelCase = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError('''Date must be between 1 - 31''' ) # Get second separator _UpperCAmelCase = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''' ) # Get year _UpperCAmelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( '''Year out of range. There has to be some sort of limit...right?''' ) # Get datetime obj for validation _UpperCAmelCase = datetime.date(int(_SCREAMING_SNAKE_CASE ) , int(_SCREAMING_SNAKE_CASE ) , int(_SCREAMING_SNAKE_CASE ) ) # Start math if m <= 2: _UpperCAmelCase = y - 1 _UpperCAmelCase = m + 12 # maths var _UpperCAmelCase = int(str(_SCREAMING_SNAKE_CASE )[:2] ) _UpperCAmelCase = int(str(_SCREAMING_SNAKE_CASE )[2:] ) _UpperCAmelCase = int(2.6 * m - 5.39 ) _UpperCAmelCase = int(c / 4 ) _UpperCAmelCase = int(k / 4 ) _UpperCAmelCase = int(d + k ) _UpperCAmelCase = int(t + u + v + x ) _UpperCAmelCase = int(z - (2 * c) ) _UpperCAmelCase = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' ) # Response _UpperCAmelCase = f'Your date {date_input}, is a {days[str(_SCREAMING_SNAKE_CASE )]}!' return response if __name__ == "__main__": import doctest doctest.testmod() __A : Union[str, Any] = argparse.ArgumentParser( description=( "Find out what day of the week nearly any date is or was. Enter " "date as a string in the mm-dd-yyyy or mm/dd/yyyy format" ) ) parser.add_argument( "date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)" ) __A : Optional[Any] = parser.parse_args() zeller(args.date_input)
326
"""simple docstring""" # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = None def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=0.999 , _SCREAMING_SNAKE_CASE : Any="cosine" , ): '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Tuple ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Any ): return math.exp(t * -12.0 ) else: raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' ) _UpperCAmelCase = [] for i in range(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = i / num_diffusion_timesteps _UpperCAmelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa ) class _a ( lowerCAmelCase , lowerCAmelCase): """simple docstring""" UpperCamelCase__ = 1 @register_to_config def __init__( self : List[Any] , __UpperCamelCase : int = 1_0_0_0 , __UpperCamelCase : float = 0.0_0_0_1 , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : str = "linear" , __UpperCamelCase : Optional[Union[np.ndarray, List[float]]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : int = 0 , __UpperCamelCase : str = "epsilon" , __UpperCamelCase : float = 1.0 , **__UpperCamelCase : Optional[int] , )->Dict: if kwargs.get('''set_alpha_to_one''' , __UpperCamelCase ) is not None: _UpperCAmelCase = ( '''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.''' ) deprecate('''set_alpha_to_one''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase ) _UpperCAmelCase = kwargs['''set_alpha_to_one'''] if trained_betas is not None: _UpperCAmelCase = torch.tensor(__UpperCamelCase , dtype=torch.floataa ) elif beta_schedule == "linear": _UpperCAmelCase = torch.linspace(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _UpperCAmelCase = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCamelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _UpperCAmelCase = betas_for_alpha_bar(__UpperCamelCase ) else: raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' ) _UpperCAmelCase = 1.0 - self.betas _UpperCAmelCase = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. _UpperCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution _UpperCAmelCase = 1.0 # setable values _UpperCAmelCase = None _UpperCAmelCase = torch.from_numpy(np.arange(0 , __UpperCamelCase ).copy().astype(np.intaa ) ) def lowercase__ ( self : str , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : Optional[int] = None )->torch.FloatTensor: return sample def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : Union[str, torch.device] = None )->Any: if num_inference_steps > self.config.num_train_timesteps: raise ValueError( F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:' F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle' F' maximal {self.config.num_train_timesteps} timesteps.' ) _UpperCAmelCase = num_inference_steps _UpperCAmelCase = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _UpperCAmelCase = (np.arange(0 , __UpperCamelCase ) * step_ratio).round().copy().astype(np.intaa ) _UpperCAmelCase = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase ) self.timesteps += self.config.steps_offset def lowercase__ ( self : Any , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : int , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float = 0.0 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : bool = True , )->Union[DDIMSchedulerOutput, Tuple]: # 1. get previous step value (=t+1) _UpperCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process _UpperCAmelCase = self.alphas_cumprod[timestep] _UpperCAmelCase = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) _UpperCAmelCase = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": _UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 _UpperCAmelCase = model_output elif self.config.prediction_type == "sample": _UpperCAmelCase = model_output _UpperCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": _UpperCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output _UpperCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or' ''' `v_prediction`''' ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: _UpperCAmelCase = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _UpperCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase ) def __len__( self : Any )->str: return self.config.num_train_timesteps
326
1
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
326
"""simple docstring""" from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = int(number**0.5 ) return number == sq * sq def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den _UpperCAmelCase = x_den * y_den * z_den _UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) top //= hcf bottom //= hcf return top, bottom def lowercase ( _SCREAMING_SNAKE_CASE : int = 35 ): '''simple docstring''' _UpperCAmelCase = set() _UpperCAmelCase = 42 _UpperCAmelCase = Fraction(0 ) _UpperCAmelCase = 42 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 _UpperCAmelCase = x_num * y_den + x_den * y_num _UpperCAmelCase = x_den * y_den _UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCAmelCase = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 _UpperCAmelCase = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) _UpperCAmelCase = x_den * x_den * y_den * y_den if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCAmelCase = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=-1 _UpperCAmelCase = x_num * y_num _UpperCAmelCase = x_den * y_num + x_num * y_den _UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCAmelCase = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 _UpperCAmelCase = x_num * x_num * y_num * y_num _UpperCAmelCase = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCAmelCase = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) for num, den in unique_s: total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return total.denominator + total.numerator if __name__ == "__main__": print(f'''{solution() = }''')
326
1
"""simple docstring""" import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class _a ( lowerCAmelCase): """simple docstring""" def lowercase__ ( self : Any )->int: _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = 8 # DPR tok _UpperCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _UpperCAmelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) _UpperCAmelCase = os.path.join(__UpperCamelCase , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok _UpperCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] _UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) ) _UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _UpperCAmelCase = {'''unk_token''': '''<unk>'''} _UpperCAmelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) _UpperCAmelCase = os.path.join(__UpperCamelCase , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) _UpperCAmelCase = os.path.join(__UpperCamelCase , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__UpperCamelCase ) ) def lowercase__ ( self : Tuple )->DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def lowercase__ ( self : Dict )->BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def lowercase__ ( self : Tuple )->List[Any]: shutil.rmtree(self.tmpdirname ) @require_tokenizers def lowercase__ ( self : Union[str, Any] )->int: _UpperCAmelCase = os.path.join(self.tmpdirname , '''rag_tokenizer''' ) _UpperCAmelCase = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) _UpperCAmelCase = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(__UpperCamelCase ) rag_tokenizer.save_pretrained(__UpperCamelCase ) _UpperCAmelCase = RagTokenizer.from_pretrained(__UpperCamelCase , config=__UpperCamelCase ) self.assertIsInstance(new_rag_tokenizer.question_encoder , __UpperCamelCase ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , __UpperCamelCase ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def lowercase__ ( self : Dict )->Optional[int]: _UpperCAmelCase = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' ) _UpperCAmelCase = [ '''who got the first nobel prize in physics''', '''when is the next deadpool movie being released''', '''which mode is used for short wave broadcast service''', '''who is the owner of reading football club''', '''when is the next scandal episode coming out''', '''when is the last time the philadelphia won the superbowl''', '''what is the most current adobe flash player version''', '''how many episodes are there in dragon ball z''', '''what is the first step in the evolution of the eye''', '''where is gall bladder situated in human body''', '''what is the main mineral in lithium batteries''', '''who is the president of usa right now''', '''where do the greasers live in the outsiders''', '''panda is a national animal of which country''', '''what is the name of manchester united stadium''', ] _UpperCAmelCase = tokenizer(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) @slow def lowercase__ ( self : str )->List[Any]: _UpperCAmelCase = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' ) _UpperCAmelCase = [ '''who got the first nobel prize in physics''', '''when is the next deadpool movie being released''', '''which mode is used for short wave broadcast service''', '''who is the owner of reading football club''', '''when is the next scandal episode coming out''', '''when is the last time the philadelphia won the superbowl''', '''what is the most current adobe flash player version''', '''how many episodes are there in dragon ball z''', '''what is the first step in the evolution of the eye''', '''where is gall bladder situated in human body''', '''what is the main mineral in lithium batteries''', '''who is the president of usa right now''', '''where do the greasers live in the outsiders''', '''panda is a national animal of which country''', '''what is the name of manchester united stadium''', ] _UpperCAmelCase = tokenizer(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase )
326
"""simple docstring""" import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' with open(_SCREAMING_SNAKE_CASE ) as metadata_file: _UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path _UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''module'''] # Load the entity vocab file _UpperCAmelCase = load_original_entity_vocab(_SCREAMING_SNAKE_CASE ) # add an entry for [MASK2] _UpperCAmelCase = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 _UpperCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks _UpperCAmelCase = AddedToken('''<ent>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = AddedToken('''<ent2>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''r''' ) as f: _UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = '''MLukeTokenizer''' with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''w''' ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) # Initialize the embeddings of the special tokens _UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''@'''] )[0] _UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''#'''] )[0] _UpperCAmelCase = state_dict['''embeddings.word_embeddings.weight'''] _UpperCAmelCase = word_emb[ent_init_index].unsqueeze(0 ) _UpperCAmelCase = word_emb[enta_init_index].unsqueeze(0 ) _UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: _UpperCAmelCase = state_dict[bias_name] _UpperCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 ) _UpperCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 ) _UpperCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _UpperCAmelCase = f'encoder.layer.{layer_index}.attention.self.' _UpperCAmelCase = state_dict[prefix + matrix_name] _UpperCAmelCase = state_dict[prefix + matrix_name] _UpperCAmelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _UpperCAmelCase = state_dict['''entity_embeddings.entity_embeddings.weight'''] _UpperCAmelCase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 ) _UpperCAmelCase = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' _UpperCAmelCase = state_dict['''entity_predictions.bias'''] _UpperCAmelCase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 ) _UpperCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] ) _UpperCAmelCase = LukeForMaskedLM(config=_SCREAMING_SNAKE_CASE ).eval() state_dict.pop('''entity_predictions.decoder.weight''' ) state_dict.pop('''lm_head.decoder.weight''' ) state_dict.pop('''lm_head.decoder.bias''' ) _UpperCAmelCase = OrderedDict() for key, value in state_dict.items(): if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )): _UpperCAmelCase = state_dict[key] else: _UpperCAmelCase = state_dict[key] _UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) if set(_SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}: raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' ) if set(_SCREAMING_SNAKE_CASE ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f'Unexpected missing_keys: {missing_keys}' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs _UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task='''entity_classification''' ) _UpperCAmelCase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).''' _UpperCAmelCase = (0, 9) _UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCAmelCase = torch.Size((1, 33, 768) ) _UpperCAmelCase = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCAmelCase = torch.Size((1, 1, 768) ) _UpperCAmelCase = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' f' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction _UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = '''Tokyo is the capital of <mask>.''' _UpperCAmelCase = (24, 30) _UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = encoding['''input_ids'''][0].tolist() _UpperCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) ) _UpperCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.entity_logits[0][0].argmax().item() _UpperCAmelCase = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(_SCREAMING_SNAKE_CASE ) ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = ['''[MASK]''', '''[PAD]''', '''[UNK]'''] _UpperCAmelCase = [json.loads(_SCREAMING_SNAKE_CASE ) for line in open(_SCREAMING_SNAKE_CASE )] _UpperCAmelCase = {} for entry in data: _UpperCAmelCase = entry['''id'''] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: _UpperCAmelCase = entity_id break _UpperCAmelCase = f'{language}:{entity_name}' _UpperCAmelCase = entity_id return new_mapping if __name__ == "__main__": __A : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) __A : List[str] = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
326
1
"""simple docstring""" from __future__ import annotations def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : list[str] | None = None ): '''simple docstring''' _UpperCAmelCase = word_bank or [] # create a table _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) + 1 _UpperCAmelCase = [] for _ in range(_SCREAMING_SNAKE_CASE ): table.append([] ) # seed value _UpperCAmelCase = [[]] # because empty string has empty combination # iterate through the indices for i in range(_SCREAMING_SNAKE_CASE ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(_SCREAMING_SNAKE_CASE )] == word: _UpperCAmelCase = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(_SCREAMING_SNAKE_CASE )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(_SCREAMING_SNAKE_CASE )]: combination.reverse() return table[len(_SCREAMING_SNAKE_CASE )] if __name__ == "__main__": print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"])) print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"])) print( all_construct( "hexagonosaurus", ["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"], ) )
326
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu __A : Tuple = [ "EAGER", "AOT_EAGER", "INDUCTOR", "NVFUSER", "AOT_NVFUSER", "AOT_CUDAGRAPHS", "OFI", "FX2TRT", "ONNXRT", "IPEX", ] def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Tuple=None ): '''simple docstring''' _UpperCAmelCase = True while ask_again: _UpperCAmelCase = input(_SCREAMING_SNAKE_CASE ) try: if default is not None and len(_SCREAMING_SNAKE_CASE ) == 0: return default return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result except Exception: if error_message is not None: print(_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int]=[] , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Dict=0 ): '''simple docstring''' _UpperCAmelCase = BulletMenu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = menu.run(default_choice=_SCREAMING_SNAKE_CASE ) return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] ) def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] ) def lowercase ( _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' _UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' _UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] ) def lowercase ( _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] ) def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' return {"yes": True, "no": False}[value.lower()] class _a ( argparse.RawDescriptionHelpFormatter): """simple docstring""" def lowercase__ ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : List[Any] )->Optional[int]: _UpperCAmelCase = super()._format_usage(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = usage.replace('''<command> [<args>] ''' , '''''' ) return usage
326
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class _a ( unittest.TestCase): """simple docstring""" UpperCamelCase__ = ViTImageProcessor if is_vision_available() else None @property def lowercase__ ( self : int )->Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self : List[str] )->Optional[Any]: _UpperCAmelCase = (3, 3_2, 1_2_8) _UpperCAmelCase = tempfile.mkdtemp() # fmt: off _UpperCAmelCase = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on _UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) ) _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '''\n''' ) _UpperCAmelCase = { '''do_normalize''': False, '''do_resize''': True, '''image_processor_type''': '''ViTImageProcessor''', '''resample''': 3, '''size''': {'''height''': 3_2, '''width''': 1_2_8}, } _UpperCAmelCase = os.path.join(self.tmpdirname , __UpperCamelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Optional[Any] , **__UpperCamelCase : Dict )->List[Any]: return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def lowercase__ ( self : Union[str, Any] , **__UpperCamelCase : int )->Any: return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->Optional[int]: shutil.rmtree(self.tmpdirname ) def lowercase__ ( self : Optional[Any] )->List[str]: _UpperCAmelCase = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta ) _UpperCAmelCase = Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) return image_input def lowercase__ ( self : List[str] )->List[Any]: _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCamelCase ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , __UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCamelCase ) def lowercase__ ( self : str )->List[str]: _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _UpperCAmelCase = self.get_image_processor(do_normalize=__UpperCamelCase , padding_value=1.0 ) _UpperCAmelCase = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCamelCase , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , __UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCamelCase ) def lowercase__ ( self : Optional[int] )->Optional[int]: _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = image_processor(__UpperCamelCase , return_tensors='''np''' ) _UpperCAmelCase = processor(images=__UpperCamelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase__ ( self : List[Any] )->Any: _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) _UpperCAmelCase = '''test''' _UpperCAmelCase = processor(text=__UpperCamelCase ) _UpperCAmelCase = tokenizer(__UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase__ ( self : Optional[Any] )->Tuple: _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) _UpperCAmelCase = '''test''' _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=__UpperCamelCase , images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def lowercase__ ( self : str )->List[Any]: _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) _UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] _UpperCAmelCase = processor.char_decode(__UpperCamelCase ) _UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase ) _UpperCAmelCase = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok] self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Dict )->List[Any]: _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) _UpperCAmelCase = None _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=__UpperCamelCase , images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def lowercase__ ( self : List[str] )->List[str]: _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) _UpperCAmelCase = torch.randn(1 , 2_7 , 3_8 ) _UpperCAmelCase = torch.randn(1 , 2_7 , 5_0_2_5_7 ) _UpperCAmelCase = torch.randn(1 , 2_7 , 3_0_5_2_2 ) _UpperCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
326
"""simple docstring""" import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''--model_ckpt''' , type=_SCREAMING_SNAKE_CASE , default='''microsoft/unixcoder-base-nine''' ) parser.add_argument('''--num_epochs''' , type=_SCREAMING_SNAKE_CASE , default=5 ) parser.add_argument('''--batch_size''' , type=_SCREAMING_SNAKE_CASE , default=6 ) parser.add_argument('''--gradient_accumulation_steps''' , type=_SCREAMING_SNAKE_CASE , default=1 ) parser.add_argument('''--freeze''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE ) parser.add_argument('''--learning_rate''' , type=_SCREAMING_SNAKE_CASE , default=5E-4 ) parser.add_argument('''--seed''' , type=_SCREAMING_SNAKE_CASE , default=0 ) parser.add_argument('''--lr_scheduler_type''' , type=_SCREAMING_SNAKE_CASE , default='''cosine''' ) parser.add_argument('''--num_warmup_steps''' , type=_SCREAMING_SNAKE_CASE , default=10 ) parser.add_argument('''--weight_decay''' , type=_SCREAMING_SNAKE_CASE , default=0.01 ) parser.add_argument('''--output_dir''' , type=_SCREAMING_SNAKE_CASE , default='''./results''' ) return parser.parse_args() __A : Union[str, Any] = load("accuracy") def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = eval_pred _UpperCAmelCase = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 ) return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE ) class _a ( lowerCAmelCase): """simple docstring""" def __init__( self : str , __UpperCamelCase : Union[str, Any] )->None: super().__init__() _UpperCAmelCase = trainer def lowercase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] )->Any: if control.should_evaluate: _UpperCAmelCase = deepcopy(__UpperCamelCase ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' ) return control_copy def lowercase ( ): '''simple docstring''' _UpperCAmelCase = get_args() set_seed(args.seed ) _UpperCAmelCase = load_dataset('''codeparrot/codecomplex''' , split='''train''' ) _UpperCAmelCase = dataset.train_test_split(test_size=0.2 ) _UpperCAmelCase = train_test['''test'''].train_test_split(test_size=0.5 ) _UpperCAmelCase = DatasetDict( { '''train''': train_test['''train'''], '''test''': test_validation['''train'''], '''valid''': test_validation['''test'''], } ) print('''Loading tokenizer and model''' ) _UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt ) _UpperCAmelCase = tokenizer.eos_token _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 ) _UpperCAmelCase = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): _UpperCAmelCase = False _UpperCAmelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) ) def tokenize(_SCREAMING_SNAKE_CASE : Any ): _UpperCAmelCase = tokenizer(example['''src'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=1024 ) _UpperCAmelCase = labels.straint(example['''complexity'''] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } _UpperCAmelCase = train_test_validation.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=train_test_validation['''train'''].column_names , ) _UpperCAmelCase = DataCollatorWithPadding(tokenizer=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = TrainingArguments( output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , ) _UpperCAmelCase = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , ) print('''Training...''' ) trainer.add_callback(CustomCallback(_SCREAMING_SNAKE_CASE ) ) trainer.train() if __name__ == "__main__": main()
326
1
"""simple docstring""" from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __A : List[str] = TypeVar("KEY") __A : List[Any] = TypeVar("VAL") @dataclass(frozen=lowerCAmelCase , slots=lowerCAmelCase) class _a ( Generic[KEY, VAL]): """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = 42 class _a ( _Item): """simple docstring""" def __init__( self : List[Any] )->None: super().__init__(__UpperCamelCase , __UpperCamelCase ) def __bool__( self : Any )->bool: return False __A : Union[str, Any] = _DeletedItem() class _a ( MutableMapping[KEY, VAL]): """simple docstring""" def __init__( self : int , __UpperCamelCase : int = 8 , __UpperCamelCase : float = 0.7_5 )->None: _UpperCAmelCase = initial_block_size _UpperCAmelCase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 _UpperCAmelCase = capacity_factor _UpperCAmelCase = 0 def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : KEY )->int: return hash(__UpperCamelCase ) % len(self._buckets ) def lowercase__ ( self : Any , __UpperCamelCase : int )->int: return (ind + 1) % len(self._buckets ) def lowercase__ ( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : KEY , __UpperCamelCase : VAL )->bool: _UpperCAmelCase = self._buckets[ind] if not stored: _UpperCAmelCase = _Item(__UpperCamelCase , __UpperCamelCase ) self._len += 1 return True elif stored.key == key: _UpperCAmelCase = _Item(__UpperCamelCase , __UpperCamelCase ) return True else: return False def lowercase__ ( self : Optional[Any] )->bool: _UpperCAmelCase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(__UpperCamelCase ) def lowercase__ ( self : Tuple )->bool: if len(self._buckets ) <= self._initial_block_size: return False _UpperCAmelCase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def lowercase__ ( self : List[Any] , __UpperCamelCase : int )->None: _UpperCAmelCase = self._buckets _UpperCAmelCase = [None] * new_size _UpperCAmelCase = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def lowercase__ ( self : Any )->None: self._resize(len(self._buckets ) * 2 ) def lowercase__ ( self : List[Any] )->None: self._resize(len(self._buckets ) // 2 ) def lowercase__ ( self : Tuple , __UpperCamelCase : KEY )->Iterator[int]: _UpperCAmelCase = self._get_bucket_index(__UpperCamelCase ) for _ in range(len(self._buckets ) ): yield ind _UpperCAmelCase = self._get_next_ind(__UpperCamelCase ) def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : KEY , __UpperCamelCase : VAL )->None: for ind in self._iterate_buckets(__UpperCamelCase ): if self._try_set(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): break def __setitem__( self : Any , __UpperCamelCase : KEY , __UpperCamelCase : VAL )->None: if self._is_full(): self._size_up() self._add_item(__UpperCamelCase , __UpperCamelCase ) def __delitem__( self : Any , __UpperCamelCase : KEY )->None: for ind in self._iterate_buckets(__UpperCamelCase ): _UpperCAmelCase = self._buckets[ind] if item is None: raise KeyError(__UpperCamelCase ) if item is _deleted: continue if item.key == key: _UpperCAmelCase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Dict , __UpperCamelCase : KEY )->VAL: for ind in self._iterate_buckets(__UpperCamelCase ): _UpperCAmelCase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(__UpperCamelCase ) def __len__( self : List[Any] )->int: return self._len def __iter__( self : int )->Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self : Union[str, Any] )->str: _UpperCAmelCase = ''' ,'''.join( F'{item.key}: {item.val}' for item in self._buckets if item ) return F'HashMap({val_string})'
326
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return "\n".join( f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=10))
326
1
"""simple docstring""" import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _a ( lowerCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = FunnelTokenizer UpperCamelCase__ = FunnelTokenizerFast UpperCamelCase__ = True UpperCamelCase__ = True def lowercase__ ( self : Optional[Any] )->str: super().setUp() _UpperCAmelCase = [ '''<unk>''', '''<cls>''', '''<sep>''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def lowercase__ ( self : Dict , **__UpperCamelCase : Tuple )->Optional[int]: return FunnelTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def lowercase__ ( self : str , **__UpperCamelCase : Optional[int] )->Tuple: return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def lowercase__ ( self : int , __UpperCamelCase : Optional[Any] )->Tuple: _UpperCAmelCase = '''UNwant\u00E9d,running''' _UpperCAmelCase = '''unwanted, running''' return input_text, output_text def lowercase__ ( self : List[str] )->str: _UpperCAmelCase = self.tokenizer_class(self.vocab_file ) _UpperCAmelCase = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__UpperCamelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [7, 4, 5, 1_0, 8, 9] ) def lowercase__ ( self : Optional[Any] )->Tuple: _UpperCAmelCase = self.get_tokenizers(do_lower_case=__UpperCamelCase ) for tokenizer in tokenizers: _UpperCAmelCase = tokenizer('''UNwant\u00E9d,running''' ) _UpperCAmelCase = len(inputs['''input_ids'''] ) - 1 self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len ) _UpperCAmelCase = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' ) self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
326
"""simple docstring""" class _a : """simple docstring""" def __init__( self : Tuple , __UpperCamelCase : list[int] )->None: _UpperCAmelCase = len(__UpperCamelCase ) _UpperCAmelCase = [0] * len_array if len_array > 0: _UpperCAmelCase = array[0] for i in range(1 , __UpperCamelCase ): _UpperCAmelCase = self.prefix_sum[i - 1] + array[i] def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : int )->int: if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowercase__ ( self : List[Any] , __UpperCamelCase : int )->bool: _UpperCAmelCase = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__UpperCamelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
326
1
"""simple docstring""" from __future__ import annotations __A : Union[str, Any] = list[list[int]] # assigning initial values to the grid __A : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __A : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def lowercase ( _SCREAMING_SNAKE_CASE : Matrix , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def lowercase ( _SCREAMING_SNAKE_CASE : Matrix ): '''simple docstring''' for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def lowercase ( _SCREAMING_SNAKE_CASE : Matrix ): '''simple docstring''' if location := find_empty_location(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase , _UpperCAmelCase = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = digit if sudoku(_SCREAMING_SNAKE_CASE ) is not None: return grid _UpperCAmelCase = 0 return None def lowercase ( _SCREAMING_SNAKE_CASE : Matrix ): '''simple docstring''' for row in grid: for cell in row: print(_SCREAMING_SNAKE_CASE , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("\nExample grid:\n" + "=" * 20) print_solution(example_grid) print("\nExample grid solution:") __A : Optional[int] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("Cannot find a solution.")
326
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Optional[int] = {"configuration_mmbt": ["MMBTConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
326
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __A : Dict = logging.get_logger(__name__) __A : Any = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.linear_k": "encoder.layers.*.self_attn.linear_k", "self_attn.linear_v": "encoder.layers.*.self_attn.linear_v", "self_attn.linear_q": "encoder.layers.*.self_attn.linear_q", "self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u", "self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v", "self_attn.linear_out": "encoder.layers.*.self_attn.linear_out", "self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos", "self_attn.rotary_emb": "encoder.embed_positions", "self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm", "conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1", "conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2", "conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv", "conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm", "conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm", "ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense", "ffn1.w_2": "encoder.layers.*.ffn1.output_dense", "ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm", "ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense", "ffn2.w_2": "encoder.layers.*.ffn2.output_dense", "ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } __A : List[Any] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' for attribute in key.split('''.''' ): _UpperCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if weight_type is not None: _UpperCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape else: _UpperCAmelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": _UpperCAmelCase = value elif weight_type == "weight_g": _UpperCAmelCase = value elif weight_type == "weight_v": _UpperCAmelCase = value elif weight_type == "bias": _UpperCAmelCase = value elif weight_type == "running_mean": _UpperCAmelCase = value elif weight_type == "running_var": _UpperCAmelCase = value elif weight_type == "num_batches_tracked": _UpperCAmelCase = value elif weight_type == "inv_freq": _UpperCAmelCase = value else: _UpperCAmelCase = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = fairseq_model.state_dict() _UpperCAmelCase = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): _UpperCAmelCase = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , ) _UpperCAmelCase = True else: for key, mapped_key in MAPPING.items(): _UpperCAmelCase = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _UpperCAmelCase = True if "*" in mapped_key: _UpperCAmelCase = name.split(_SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2] _UpperCAmelCase = mapped_key.replace('''*''' , _SCREAMING_SNAKE_CASE ) if "pos_bias_u" in name: _UpperCAmelCase = None elif "pos_bias_v" in name: _UpperCAmelCase = None elif "weight_g" in name: _UpperCAmelCase = '''weight_g''' elif "weight_v" in name: _UpperCAmelCase = '''weight_v''' elif "bias" in name: _UpperCAmelCase = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj _UpperCAmelCase = '''weight''' elif "running_mean" in name: _UpperCAmelCase = '''running_mean''' elif "inv_freq" in name: _UpperCAmelCase = '''inv_freq''' elif "running_var" in name: _UpperCAmelCase = '''running_var''' elif "num_batches_tracked" in name: _UpperCAmelCase = '''num_batches_tracked''' else: _UpperCAmelCase = None set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(f'Unused weights: {unused_weights}' ) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = full_name.split('''conv_layers.''' )[-1] _UpperCAmelCase = name.split('''.''' ) _UpperCAmelCase = int(items[0] ) _UpperCAmelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) _UpperCAmelCase = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) _UpperCAmelCase = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) _UpperCAmelCase = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) _UpperCAmelCase = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : Optional[Any]=True ): '''simple docstring''' if config_path is not None: _UpperCAmelCase = WavaVecaConformerConfig.from_pretrained(_SCREAMING_SNAKE_CASE , hidden_act='''swish''' ) else: _UpperCAmelCase = WavaVecaConformerConfig() if "rope" in checkpoint_path: _UpperCAmelCase = '''rotary''' if is_finetuned: if dict_path: _UpperCAmelCase = Dictionary.load(_SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _UpperCAmelCase = target_dict.pad_index _UpperCAmelCase = target_dict.bos_index _UpperCAmelCase = target_dict.eos_index _UpperCAmelCase = len(target_dict.symbols ) _UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , '''vocab.json''' ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_SCREAMING_SNAKE_CASE ) ) return os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = target_dict.indices # fairseq has the <pad> and <s> switched _UpperCAmelCase = 0 _UpperCAmelCase = 1 with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = WavaVecaCTCTokenizer( _SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = True if config.feat_extract_norm == '''layer''' else False _UpperCAmelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = WavaVecaConformerForCTC(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = WavaVecaConformerForPreTraining(_SCREAMING_SNAKE_CASE ) if is_finetuned: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _UpperCAmelCase = argparse.Namespace(task='''audio_pretraining''' ) _UpperCAmelCase = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model[0].eval() recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __A : List[Any] = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
326
"""simple docstring""" __A : Tuple = frozenset( [ "prompt", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) __A : Union[str, Any] = frozenset(["prompt", "negative_prompt"]) __A : str = frozenset([]) __A : List[str] = frozenset(["image"]) __A : Optional[Any] = frozenset( [ "image", "height", "width", "guidance_scale", ] ) __A : Optional[int] = frozenset(["image"]) __A : Optional[int] = frozenset( [ "prompt", "image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) __A : Optional[Any] = frozenset(["prompt", "image", "negative_prompt"]) __A : str = frozenset( [ # Text guided image variation with an image mask "prompt", "image", "mask_image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) __A : Tuple = frozenset(["prompt", "image", "mask_image", "negative_prompt"]) __A : List[str] = frozenset( [ # image variation with an image mask "image", "mask_image", "height", "width", "guidance_scale", ] ) __A : List[Any] = frozenset(["image", "mask_image"]) __A : List[str] = frozenset( [ "example_image", "image", "mask_image", "height", "width", "guidance_scale", ] ) __A : Tuple = frozenset(["example_image", "image", "mask_image"]) __A : Dict = frozenset(["class_labels"]) __A : str = frozenset(["class_labels"]) __A : str = frozenset(["batch_size"]) __A : Union[str, Any] = frozenset([]) __A : str = frozenset(["batch_size"]) __A : Optional[int] = frozenset([]) __A : Any = frozenset( [ "prompt", "audio_length_in_s", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) __A : List[str] = frozenset(["prompt", "negative_prompt"]) __A : Tuple = frozenset(["input_tokens"]) __A : Optional[int] = frozenset(["input_tokens"])
326
1
"""simple docstring""" import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' with open(_SCREAMING_SNAKE_CASE ) as metadata_file: _UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path _UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''module'''] # Load the entity vocab file _UpperCAmelCase = load_original_entity_vocab(_SCREAMING_SNAKE_CASE ) # add an entry for [MASK2] _UpperCAmelCase = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 _UpperCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks _UpperCAmelCase = AddedToken('''<ent>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = AddedToken('''<ent2>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''r''' ) as f: _UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = '''MLukeTokenizer''' with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''w''' ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) # Initialize the embeddings of the special tokens _UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''@'''] )[0] _UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''#'''] )[0] _UpperCAmelCase = state_dict['''embeddings.word_embeddings.weight'''] _UpperCAmelCase = word_emb[ent_init_index].unsqueeze(0 ) _UpperCAmelCase = word_emb[enta_init_index].unsqueeze(0 ) _UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: _UpperCAmelCase = state_dict[bias_name] _UpperCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 ) _UpperCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 ) _UpperCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _UpperCAmelCase = f'encoder.layer.{layer_index}.attention.self.' _UpperCAmelCase = state_dict[prefix + matrix_name] _UpperCAmelCase = state_dict[prefix + matrix_name] _UpperCAmelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _UpperCAmelCase = state_dict['''entity_embeddings.entity_embeddings.weight'''] _UpperCAmelCase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 ) _UpperCAmelCase = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' _UpperCAmelCase = state_dict['''entity_predictions.bias'''] _UpperCAmelCase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 ) _UpperCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] ) _UpperCAmelCase = LukeForMaskedLM(config=_SCREAMING_SNAKE_CASE ).eval() state_dict.pop('''entity_predictions.decoder.weight''' ) state_dict.pop('''lm_head.decoder.weight''' ) state_dict.pop('''lm_head.decoder.bias''' ) _UpperCAmelCase = OrderedDict() for key, value in state_dict.items(): if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )): _UpperCAmelCase = state_dict[key] else: _UpperCAmelCase = state_dict[key] _UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) if set(_SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}: raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' ) if set(_SCREAMING_SNAKE_CASE ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f'Unexpected missing_keys: {missing_keys}' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs _UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task='''entity_classification''' ) _UpperCAmelCase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).''' _UpperCAmelCase = (0, 9) _UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCAmelCase = torch.Size((1, 33, 768) ) _UpperCAmelCase = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCAmelCase = torch.Size((1, 1, 768) ) _UpperCAmelCase = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' f' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction _UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = '''Tokyo is the capital of <mask>.''' _UpperCAmelCase = (24, 30) _UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = encoding['''input_ids'''][0].tolist() _UpperCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) ) _UpperCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.entity_logits[0][0].argmax().item() _UpperCAmelCase = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(_SCREAMING_SNAKE_CASE ) ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = ['''[MASK]''', '''[PAD]''', '''[UNK]'''] _UpperCAmelCase = [json.loads(_SCREAMING_SNAKE_CASE ) for line in open(_SCREAMING_SNAKE_CASE )] _UpperCAmelCase = {} for entry in data: _UpperCAmelCase = entry['''id'''] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: _UpperCAmelCase = entity_id break _UpperCAmelCase = f'{language}:{entity_name}' _UpperCAmelCase = entity_id return new_mapping if __name__ == "__main__": __A : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) __A : List[str] = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
326
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : Optional[Any] = { "configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"], "convert_funnel_original_tf_checkpoint_to_pytorch": [], "tokenization_funnel": ["FunnelTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = ["FunnelTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "FunnelBaseModel", "FunnelForMaskedLM", "FunnelForMultipleChoice", "FunnelForPreTraining", "FunnelForQuestionAnswering", "FunnelForSequenceClassification", "FunnelForTokenClassification", "FunnelModel", "FunnelPreTrainedModel", "load_tf_weights_in_funnel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Dict = [ "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFunnelBaseModel", "TFFunnelForMaskedLM", "TFFunnelForMultipleChoice", "TFFunnelForPreTraining", "TFFunnelForQuestionAnswering", "TFFunnelForSequenceClassification", "TFFunnelForTokenClassification", "TFFunnelModel", "TFFunnelPreTrainedModel", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
326
1
"""simple docstring""" from datetime import datetime import requests def lowercase ( _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' _UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src'''] return requests.get(_SCREAMING_SNAKE_CASE ).content if __name__ == "__main__": __A : str = input("Enter Video/IGTV url: ").strip() __A : str = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4''' with open(file_name, "wb") as fp: fp.write(download_video(url)) print(f'''Done. Video saved to disk as {file_name}.''')
326
"""simple docstring""" import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __A : Union[str, Any] = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. __A : Tuple = importlib.util.spec_from_file_location( "transformers", os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __A : List[str] = spec.loader.load_module() __A : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __A : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)") __A : List[str] = { "CLIPConfigMixin", "DecisionTransformerConfigMixin", "EncoderDecoderConfigMixin", "RagConfigMixin", "SpeechEncoderDecoderConfigMixin", "VisionEncoderDecoderConfigMixin", "VisionTextDualEncoderConfigMixin", } def lowercase ( ): '''simple docstring''' _UpperCAmelCase = [] for config_class in list(CONFIG_MAPPING.values() ): _UpperCAmelCase = False # source code of `config_class` _UpperCAmelCase = inspect.getsource(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = _re_checkpoint.findall(_SCREAMING_SNAKE_CASE ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` _UpperCAmelCase , _UpperCAmelCase = checkpoint # verify the checkpoint name corresponds to the checkpoint link _UpperCAmelCase = f'https://huggingface.co/{ckpt_name}' if ckpt_link == ckpt_link_from_name: _UpperCAmelCase = True break _UpperCAmelCase = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: _UpperCAmelCase = '''\n'''.join(sorted(_SCREAMING_SNAKE_CASE ) ) raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
326
1
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : Dict )->Dict: _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = BlipImageProcessor() _UpperCAmelCase = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' ) _UpperCAmelCase = BlipaProcessor(__UpperCamelCase , __UpperCamelCase ) processor.save_pretrained(self.tmpdirname ) def lowercase__ ( self : Any , **__UpperCamelCase : Dict )->Dict: return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase ).tokenizer def lowercase__ ( self : Tuple , **__UpperCamelCase : Any )->Optional[Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase ).image_processor def lowercase__ ( self : Tuple )->Tuple: shutil.rmtree(self.tmpdirname ) def lowercase__ ( self : List[str] )->Optional[Any]: _UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _UpperCAmelCase = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase__ ( self : Tuple )->List[str]: _UpperCAmelCase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _UpperCAmelCase = self.get_image_processor(do_normalize=__UpperCamelCase , padding_value=1.0 ) _UpperCAmelCase = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCamelCase ) def lowercase__ ( self : Any )->Dict: _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipaProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = image_processor(__UpperCamelCase , return_tensors='''np''' ) _UpperCAmelCase = processor(images=__UpperCamelCase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase__ ( self : Any )->List[str]: _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipaProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) _UpperCAmelCase = '''lower newer''' _UpperCAmelCase = processor(text=__UpperCamelCase ) _UpperCAmelCase = tokenizer(__UpperCamelCase , return_token_type_ids=__UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase__ ( self : Union[str, Any] )->List[Any]: _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipaProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) _UpperCAmelCase = '''lower newer''' _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=__UpperCamelCase , images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def lowercase__ ( self : List[str] )->List[str]: _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipaProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) _UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCAmelCase = processor.batch_decode(__UpperCamelCase ) _UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Dict )->Tuple: _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BlipaProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) _UpperCAmelCase = '''lower newer''' _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=__UpperCamelCase , images=__UpperCamelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
326
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if bit_count < 0: raise ValueError('''The given input must be positive''' ) # get the generated string sequence _UpperCAmelCase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE ) # # convert them to integers for i in range(len(_SCREAMING_SNAKE_CASE ) ): _UpperCAmelCase = int(sequence[i] , 2 ) return sequence def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] _UpperCAmelCase = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits _UpperCAmelCase = gray_code_sequence_string(bit_count - 1 ) _UpperCAmelCase = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): _UpperCAmelCase = '''0''' + smaller_sequence[i] sequence.append(_SCREAMING_SNAKE_CASE ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): _UpperCAmelCase = '''1''' + smaller_sequence[i] sequence.append(_SCREAMING_SNAKE_CASE ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
326
1
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : int = 50 ): '''simple docstring''' _UpperCAmelCase = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
326
"""simple docstring""" import math def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 0 , _SCREAMING_SNAKE_CASE : int = 0 ): '''simple docstring''' _UpperCAmelCase = end or len(_SCREAMING_SNAKE_CASE ) for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = i _UpperCAmelCase = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: _UpperCAmelCase = array[temp_index - 1] temp_index -= 1 _UpperCAmelCase = temp_index_value return array def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): # Max Heap '''simple docstring''' _UpperCAmelCase = index _UpperCAmelCase = 2 * index + 1 # Left Node _UpperCAmelCase = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: _UpperCAmelCase = left_index if right_index < heap_size and array[largest] < array[right_index]: _UpperCAmelCase = right_index if largest != index: _UpperCAmelCase , _UpperCAmelCase = array[largest], array[index] heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : list ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) for i in range(n // 2 , -1 , -1 ): heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in range(n - 1 , 0 , -1 ): _UpperCAmelCase , _UpperCAmelCase = array[0], array[i] heapify(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE ) return array def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = low _UpperCAmelCase = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i _UpperCAmelCase , _UpperCAmelCase = array[j], array[i] i += 1 def lowercase ( _SCREAMING_SNAKE_CASE : list ): '''simple docstring''' if len(_SCREAMING_SNAKE_CASE ) == 0: return array _UpperCAmelCase = 2 * math.ceil(math.loga(len(_SCREAMING_SNAKE_CASE ) ) ) _UpperCAmelCase = 16 return intro_sort(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(_SCREAMING_SNAKE_CASE ) max_depth -= 1 _UpperCAmelCase = median_of_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 ) _UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) intro_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = p return insertion_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() __A : List[str] = input("Enter numbers separated by a comma : ").strip() __A : Optional[Any] = [float(item) for item in user_input.split(",")] print(sort(unsorted))
326
1
"""simple docstring""" class _a ( lowerCAmelCase): """simple docstring""" pass class _a ( lowerCAmelCase): """simple docstring""" pass class _a : """simple docstring""" def __init__( self : Tuple )->Optional[int]: _UpperCAmelCase = [ [], [], [], ] def lowercase__ ( self : List[str] , __UpperCamelCase : int , __UpperCamelCase : int )->None: try: if len(self.queues[priority] ) >= 1_0_0: raise OverflowError('''Maximum queue size is 100''' ) self.queues[priority].append(__UpperCamelCase ) except IndexError: raise ValueError('''Valid priorities are 0, 1, and 2''' ) def lowercase__ ( self : List[Any] )->int: for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError('''All queues are empty''' ) def __str__( self : Optional[Any] )->str: return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) ) class _a : """simple docstring""" def __init__( self : Dict )->List[Any]: _UpperCAmelCase = [] def lowercase__ ( self : int , __UpperCamelCase : int )->None: if len(self.queue ) == 1_0_0: raise OverFlowError('''Maximum queue size is 100''' ) self.queue.append(__UpperCamelCase ) def lowercase__ ( self : str )->int: if not self.queue: raise UnderFlowError('''The queue is empty''' ) else: _UpperCAmelCase = min(self.queue ) self.queue.remove(__UpperCamelCase ) return data def __str__( self : str )->str: return str(self.queue ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 100 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 128 ) print(_SCREAMING_SNAKE_CASE ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(_SCREAMING_SNAKE_CASE ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(100 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(128 ) print(_SCREAMING_SNAKE_CASE ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(_SCREAMING_SNAKE_CASE ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
326
"""simple docstring""" from __future__ import annotations import numpy as np def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = np.shape(_SCREAMING_SNAKE_CASE ) if rows != columns: _UpperCAmelCase = ( '''\'table\' has to be of square shaped array but got a ''' f'{rows}x{columns} array:\n{table}' ) raise ValueError(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = np.zeros((rows, columns) ) _UpperCAmelCase = np.zeros((rows, columns) ) for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) ) if upper[j][j] == 0: raise ArithmeticError('''No LU decomposition exists''' ) _UpperCAmelCase = (table[i][j] - total) / upper[j][j] _UpperCAmelCase = 1 for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
326
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A : Dict = { "configuration_altclip": [ "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "AltCLIPConfig", "AltCLIPTextConfig", "AltCLIPVisionConfig", ], "processing_altclip": ["AltCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Dict = [ "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "AltCLIPPreTrainedModel", "AltCLIPModel", "AltCLIPTextModel", "AltCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
326
"""simple docstring""" import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _a ( lowerCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = CTRLTokenizer UpperCamelCase__ = False UpperCamelCase__ = False def lowercase__ ( self : Dict )->str: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCAmelCase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>'''] _UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) ) _UpperCAmelCase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', ''''''] _UpperCAmelCase = {'''unk_token''': '''<unk>'''} _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__UpperCamelCase ) ) def lowercase__ ( self : str , **__UpperCamelCase : Union[str, Any] )->Any: kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[int] )->Tuple: _UpperCAmelCase = '''adapt react readapt apt''' _UpperCAmelCase = '''adapt react readapt apt''' return input_text, output_text def lowercase__ ( self : Dict )->Optional[int]: _UpperCAmelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _UpperCAmelCase = '''adapt react readapt apt''' _UpperCAmelCase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split() _UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = tokens + [tokenizer.unk_token] _UpperCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
326
1
"""simple docstring""" from __future__ import annotations from scipy.special import comb # type: ignore class _a : """simple docstring""" def __init__( self : Union[str, Any] , __UpperCamelCase : list[tuple[float, float]] )->str: _UpperCAmelCase = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. _UpperCAmelCase = len(__UpperCamelCase ) - 1 def lowercase__ ( self : str , __UpperCamelCase : float )->list[float]: assert 0 <= t <= 1, "Time t must be between 0 and 1." _UpperCAmelCase = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , __UpperCamelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(__UpperCamelCase ) , 5 ) == 1 return output_values def lowercase__ ( self : List[str] , __UpperCamelCase : float )->tuple[float, float]: assert 0 <= t <= 1, "Time t must be between 0 and 1." _UpperCAmelCase = self.basis_function(__UpperCamelCase ) _UpperCAmelCase = 0.0 _UpperCAmelCase = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def lowercase__ ( self : int , __UpperCamelCase : float = 0.0_1 )->Any: from matplotlib import pyplot as plt # type: ignore _UpperCAmelCase = [] # x coordinates of points to plot _UpperCAmelCase = [] # y coordinates of points to plot _UpperCAmelCase = 0.0 while t <= 1: _UpperCAmelCase = self.bezier_curve_function(__UpperCamelCase ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size _UpperCAmelCase = [i[0] for i in self.list_of_points] _UpperCAmelCase = [i[1] for i in self.list_of_points] plt.plot( __UpperCamelCase , __UpperCamelCase , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , ) plt.scatter(__UpperCamelCase , __UpperCamelCase , color='''red''' , label='''Control Points''' ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
326
"""simple docstring""" import logging import os from .state import PartialState class _a ( logging.LoggerAdapter): """simple docstring""" @staticmethod def lowercase__ ( __UpperCamelCase : Optional[Any] )->List[Any]: _UpperCAmelCase = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Union[str, Any] )->int: if PartialState._shared_state == {}: raise RuntimeError( '''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' ) _UpperCAmelCase = kwargs.pop('''main_process_only''' , __UpperCamelCase ) _UpperCAmelCase = kwargs.pop('''in_order''' , __UpperCamelCase ) if self.isEnabledFor(__UpperCamelCase ): if self._should_log(__UpperCamelCase ): _UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase ) self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) elif in_order: _UpperCAmelCase = PartialState() for i in range(state.num_processes ): if i == state.process_index: _UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase ) self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) state.wait_for_everyone() def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = None ): '''simple docstring''' if log_level is None: _UpperCAmelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = logging.getLogger(_SCREAMING_SNAKE_CASE ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(_SCREAMING_SNAKE_CASE , {} )
326
1
"""simple docstring""" import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : str )->Tuple: _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = SamImageProcessor() _UpperCAmelCase = SamProcessor(__UpperCamelCase ) processor.save_pretrained(self.tmpdirname ) def lowercase__ ( self : List[str] , **__UpperCamelCase : str )->Union[str, Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase ).image_processor def lowercase__ ( self : List[Any] )->List[Any]: shutil.rmtree(self.tmpdirname ) def lowercase__ ( self : Optional[int] )->Dict: _UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _UpperCAmelCase = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase__ ( self : int )->Tuple: _UpperCAmelCase = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = self.get_image_processor(do_normalize=__UpperCamelCase , padding_value=1.0 ) _UpperCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__UpperCamelCase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCamelCase ) def lowercase__ ( self : Any )->List[str]: _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = SamProcessor(image_processor=__UpperCamelCase ) _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = image_processor(__UpperCamelCase , return_tensors='''np''' ) _UpperCAmelCase = processor(images=__UpperCamelCase , return_tensors='''np''' ) input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_torch def lowercase__ ( self : Dict )->Union[str, Any]: _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = SamProcessor(image_processor=__UpperCamelCase ) _UpperCAmelCase = [torch.ones((1, 3, 5, 5) )] _UpperCAmelCase = [[1_7_6_4, 2_6_4_6]] _UpperCAmelCase = [[6_8_3, 1_0_2_4]] _UpperCAmelCase = processor.post_process_masks(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) ) _UpperCAmelCase = processor.post_process_masks( __UpperCamelCase , torch.tensor(__UpperCamelCase ) , torch.tensor(__UpperCamelCase ) ) self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) ) # should also work with np _UpperCAmelCase = [np.ones((1, 3, 5, 5) )] _UpperCAmelCase = processor.post_process_masks(__UpperCamelCase , np.array(__UpperCamelCase ) , np.array(__UpperCamelCase ) ) self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) ) _UpperCAmelCase = [[1, 0], [0, 1]] with self.assertRaises(__UpperCamelCase ): _UpperCAmelCase = processor.post_process_masks(__UpperCamelCase , np.array(__UpperCamelCase ) , np.array(__UpperCamelCase ) ) @require_vision @require_tf class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : Union[str, Any] )->str: _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = SamImageProcessor() _UpperCAmelCase = SamProcessor(__UpperCamelCase ) processor.save_pretrained(self.tmpdirname ) def lowercase__ ( self : Any , **__UpperCamelCase : Optional[int] )->Tuple: return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase ).image_processor def lowercase__ ( self : List[Any] )->Any: shutil.rmtree(self.tmpdirname ) def lowercase__ ( self : Tuple )->Tuple: _UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _UpperCAmelCase = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase__ ( self : Optional[Any] )->Optional[Any]: _UpperCAmelCase = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = self.get_image_processor(do_normalize=__UpperCamelCase , padding_value=1.0 ) _UpperCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__UpperCamelCase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCamelCase ) def lowercase__ ( self : Optional[int] )->Dict: _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = SamProcessor(image_processor=__UpperCamelCase ) _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = image_processor(__UpperCamelCase , return_tensors='''np''' ) _UpperCAmelCase = processor(images=__UpperCamelCase , return_tensors='''np''' ) input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_tf def lowercase__ ( self : Union[str, Any] )->List[Any]: _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = SamProcessor(image_processor=__UpperCamelCase ) _UpperCAmelCase = [tf.ones((1, 3, 5, 5) )] _UpperCAmelCase = [[1_7_6_4, 2_6_4_6]] _UpperCAmelCase = [[6_8_3, 1_0_2_4]] _UpperCAmelCase = processor.post_process_masks(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , return_tensors='''tf''' ) self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) ) _UpperCAmelCase = processor.post_process_masks( __UpperCamelCase , tf.convert_to_tensor(__UpperCamelCase ) , tf.convert_to_tensor(__UpperCamelCase ) , return_tensors='''tf''' , ) self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) ) # should also work with np _UpperCAmelCase = [np.ones((1, 3, 5, 5) )] _UpperCAmelCase = processor.post_process_masks( __UpperCamelCase , np.array(__UpperCamelCase ) , np.array(__UpperCamelCase ) , return_tensors='''tf''' ) self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) ) _UpperCAmelCase = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): _UpperCAmelCase = processor.post_process_masks( __UpperCamelCase , np.array(__UpperCamelCase ) , np.array(__UpperCamelCase ) , return_tensors='''tf''' ) @require_vision @require_torchvision class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : List[str] )->Union[str, Any]: _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = SamImageProcessor() _UpperCAmelCase = SamProcessor(__UpperCamelCase ) processor.save_pretrained(self.tmpdirname ) def lowercase__ ( self : Optional[Any] , **__UpperCamelCase : Any )->Optional[Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase ).image_processor def lowercase__ ( self : Dict )->Any: shutil.rmtree(self.tmpdirname ) def lowercase__ ( self : Any )->int: _UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _UpperCAmelCase = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def lowercase__ ( self : Optional[int] )->str: _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = SamProcessor(image_processor=__UpperCamelCase ) _UpperCAmelCase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) _UpperCAmelCase = [tf.convert_to_tensor(__UpperCamelCase )] _UpperCAmelCase = [torch.tensor(__UpperCamelCase )] _UpperCAmelCase = [[1_7_6_4, 2_6_4_6]] _UpperCAmelCase = [[6_8_3, 1_0_2_4]] _UpperCAmelCase = processor.post_process_masks( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , return_tensors='''tf''' ) _UpperCAmelCase = processor.post_process_masks( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , return_tensors='''pt''' ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def lowercase__ ( self : Optional[int] )->Optional[Any]: _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = SamProcessor(image_processor=__UpperCamelCase ) _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = image_processor(__UpperCamelCase , return_tensors='''pt''' )['''pixel_values'''].numpy() _UpperCAmelCase = processor(images=__UpperCamelCase , return_tensors='''pt''' )['''pixel_values'''].numpy() _UpperCAmelCase = image_processor(__UpperCamelCase , return_tensors='''tf''' )['''pixel_values'''].numpy() _UpperCAmelCase = processor(images=__UpperCamelCase , return_tensors='''tf''' )['''pixel_values'''].numpy() self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) ) self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) ) self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
326
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __A : List[Any] = logging.get_logger(__name__) class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = ["""pixel_values"""] def __init__( self : Tuple , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Dict[str, int]] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_5_5 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : Tuple , )->None: super().__init__(**__UpperCamelCase ) _UpperCAmelCase = size if size is not None else {'''shortest_edge''': 2_5_6} _UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) _UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} _UpperCAmelCase = get_size_dict(__UpperCamelCase ) _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = resample _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : int , )->np.ndarray: _UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) _UpperCAmelCase = get_resize_output_image_size(__UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCamelCase ) return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : Dict , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , )->np.ndarray: _UpperCAmelCase = get_size_dict(__UpperCamelCase ) return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : Any , __UpperCamelCase : np.ndarray , __UpperCamelCase : float , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Union[str, Any] )->np.ndarray: return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , )->np.ndarray: return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : List[str] , __UpperCamelCase : ImageInput , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[float] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__UpperCamelCase : str , )->List[Any]: _UpperCAmelCase = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase = size if size is not None else self.size _UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) _UpperCAmelCase = resample if resample is not None else self.resample _UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCAmelCase = crop_size if crop_size is not None else self.crop_size _UpperCAmelCase = get_size_dict(__UpperCamelCase ) _UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase = image_mean if image_mean is not None else self.image_mean _UpperCAmelCase = image_std if image_std is not None else self.image_std _UpperCAmelCase = make_list_of_images(__UpperCamelCase ) if not valid_images(__UpperCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _UpperCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images] if do_resize: _UpperCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images] if do_center_crop: _UpperCAmelCase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images] if do_rescale: _UpperCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images] if do_normalize: _UpperCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images] _UpperCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images] _UpperCAmelCase = {'''pixel_values''': images} return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
326
1
"""simple docstring""" import csv import tweepy # Twitter API credentials __A : Optional[int] = "" __A : Union[str, Any] = "" __A : Any = "" __A : List[str] = "" def lowercase ( _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = tweepy.OAuthHandler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) auth.set_access_token(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tweepy.API(_SCREAMING_SNAKE_CASE ) # initialize a list to hold all the tweepy Tweets _UpperCAmelCase = [] # make initial request for most recent tweets (200 is the maximum allowed count) _UpperCAmelCase = api.user_timeline(screen_name=_SCREAMING_SNAKE_CASE , count=200 ) # save most recent tweets alltweets.extend(_SCREAMING_SNAKE_CASE ) # save the id of the oldest tweet less one _UpperCAmelCase = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(_SCREAMING_SNAKE_CASE ) > 0: print(f'getting tweets before {oldest}' ) # all subsequent requests use the max_id param to prevent duplicates _UpperCAmelCase = api.user_timeline( screen_name=_SCREAMING_SNAKE_CASE , count=200 , max_id=_SCREAMING_SNAKE_CASE ) # save most recent tweets alltweets.extend(_SCREAMING_SNAKE_CASE ) # update the id of the oldest tweet less one _UpperCAmelCase = alltweets[-1].id - 1 print(f'...{len(_SCREAMING_SNAKE_CASE )} tweets downloaded so far' ) # transform the tweepy tweets into a 2D array that will populate the csv _UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f'new_{screen_name}_tweets.csv' , '''w''' ) as f: _UpperCAmelCase = csv.writer(_SCREAMING_SNAKE_CASE ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("FirePing32")
326
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __A : List[Any] = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys __A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
326
1
"""simple docstring""" import functools def lowercase ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ): '''simple docstring''' if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for day in days ): raise ValueError('''The parameter days should be a list of integers''' ) if len(_SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for cost in costs ): raise ValueError('''The parameter costs should be a list of three integers''' ) if len(_SCREAMING_SNAKE_CASE ) == 0: return 0 if min(_SCREAMING_SNAKE_CASE ) <= 0: raise ValueError('''All days elements should be greater than 0''' ) if max(_SCREAMING_SNAKE_CASE ) >= 366: raise ValueError('''All days elements should be less than 366''' ) _UpperCAmelCase = set(_SCREAMING_SNAKE_CASE ) @functools.cache def dynamic_programming(_SCREAMING_SNAKE_CASE : int ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
326
"""simple docstring""" from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = None UpperCamelCase__ = None __A : Union[str, Any] = namedtuple("CoinsDistribResult", "moves excess") def lowercase ( _SCREAMING_SNAKE_CASE : TreeNode | None ): '''simple docstring''' if root is None: return 0 # Validation def count_nodes(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ): raise ValueError('''The nodes number should be same as the number of coins''' ) # Main calculation def get_distrib(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) _UpperCAmelCase , _UpperCAmelCase = get_distrib(node.left ) _UpperCAmelCase , _UpperCAmelCase = get_distrib(node.right ) _UpperCAmelCase = 1 - left_distrib_excess _UpperCAmelCase = 1 - right_distrib_excess _UpperCAmelCase = ( left_distrib_moves + right_distrib_moves + abs(_SCREAMING_SNAKE_CASE ) + abs(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = node.data - coins_to_left - coins_to_right return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return get_distrib(_SCREAMING_SNAKE_CASE )[0] if __name__ == "__main__": import doctest doctest.testmod()
326
1
"""simple docstring""" import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''--model_ckpt''' , type=_SCREAMING_SNAKE_CASE , default='''microsoft/unixcoder-base-nine''' ) parser.add_argument('''--num_epochs''' , type=_SCREAMING_SNAKE_CASE , default=5 ) parser.add_argument('''--batch_size''' , type=_SCREAMING_SNAKE_CASE , default=6 ) parser.add_argument('''--gradient_accumulation_steps''' , type=_SCREAMING_SNAKE_CASE , default=1 ) parser.add_argument('''--freeze''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE ) parser.add_argument('''--learning_rate''' , type=_SCREAMING_SNAKE_CASE , default=5E-4 ) parser.add_argument('''--seed''' , type=_SCREAMING_SNAKE_CASE , default=0 ) parser.add_argument('''--lr_scheduler_type''' , type=_SCREAMING_SNAKE_CASE , default='''cosine''' ) parser.add_argument('''--num_warmup_steps''' , type=_SCREAMING_SNAKE_CASE , default=10 ) parser.add_argument('''--weight_decay''' , type=_SCREAMING_SNAKE_CASE , default=0.01 ) parser.add_argument('''--output_dir''' , type=_SCREAMING_SNAKE_CASE , default='''./results''' ) return parser.parse_args() __A : Union[str, Any] = load("accuracy") def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = eval_pred _UpperCAmelCase = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 ) return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE ) class _a ( lowerCAmelCase): """simple docstring""" def __init__( self : str , __UpperCamelCase : Union[str, Any] )->None: super().__init__() _UpperCAmelCase = trainer def lowercase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] )->Any: if control.should_evaluate: _UpperCAmelCase = deepcopy(__UpperCamelCase ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' ) return control_copy def lowercase ( ): '''simple docstring''' _UpperCAmelCase = get_args() set_seed(args.seed ) _UpperCAmelCase = load_dataset('''codeparrot/codecomplex''' , split='''train''' ) _UpperCAmelCase = dataset.train_test_split(test_size=0.2 ) _UpperCAmelCase = train_test['''test'''].train_test_split(test_size=0.5 ) _UpperCAmelCase = DatasetDict( { '''train''': train_test['''train'''], '''test''': test_validation['''train'''], '''valid''': test_validation['''test'''], } ) print('''Loading tokenizer and model''' ) _UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt ) _UpperCAmelCase = tokenizer.eos_token _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 ) _UpperCAmelCase = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): _UpperCAmelCase = False _UpperCAmelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) ) def tokenize(_SCREAMING_SNAKE_CASE : Any ): _UpperCAmelCase = tokenizer(example['''src'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=1024 ) _UpperCAmelCase = labels.straint(example['''complexity'''] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } _UpperCAmelCase = train_test_validation.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=train_test_validation['''train'''].column_names , ) _UpperCAmelCase = DataCollatorWithPadding(tokenizer=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = TrainingArguments( output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , ) _UpperCAmelCase = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , ) print('''Training...''' ) trainer.add_callback(CustomCallback(_SCREAMING_SNAKE_CASE ) ) trainer.train() if __name__ == "__main__": main()
326
"""simple docstring""" from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) UpperCamelCase__ = ( { """feature-extraction""": TFMobileBertModel, """fill-mask""": TFMobileBertForMaskedLM, """question-answering""": TFMobileBertForQuestionAnswering, """text-classification""": TFMobileBertForSequenceClassification, """token-classification""": TFMobileBertForTokenClassification, """zero-shot""": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False def lowercase__ ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str=False )->Optional[Any]: _UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) if return_labels: if model_class in get_values(__UpperCamelCase ): _UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class _a ( lowerCAmelCase): """simple docstring""" def __init__( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=1_3 , __UpperCamelCase : Any=7 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Dict=9_9 , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Optional[Any]=3_7 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Optional[Any]=5_1_2 , __UpperCamelCase : Any=1_6 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : List[str]=None , )->Any: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope _UpperCAmelCase = embedding_size def lowercase__ ( self : Optional[int] )->int: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] )->List[Any]: _UpperCAmelCase = TFMobileBertModel(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = [input_ids, input_mask] _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowercase__ ( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->Tuple: _UpperCAmelCase = TFMobileBertForMaskedLM(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Any )->List[Any]: _UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict )->List[Any]: _UpperCAmelCase = TFMobileBertForPreTraining(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] )->Any: _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForSequenceClassification(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] )->List[str]: _UpperCAmelCase = self.num_choices _UpperCAmelCase = TFMobileBertForMultipleChoice(config=__UpperCamelCase ) _UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any )->Dict: _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForTokenClassification(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->List[Any]: _UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : List[str] )->Optional[Any]: _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict def lowercase__ ( self : List[Any] )->str: _UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 ) def lowercase__ ( self : List[Any] )->List[str]: self.config_tester.run_common_tests() def lowercase__ ( self : Optional[Any] )->Union[str, Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__UpperCamelCase ) def lowercase__ ( self : Any )->Union[str, Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__UpperCamelCase ) def lowercase__ ( self : List[Any] )->Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__UpperCamelCase ) def lowercase__ ( self : str )->Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__UpperCamelCase ) def lowercase__ ( self : Any )->List[str]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__UpperCamelCase ) def lowercase__ ( self : Dict )->Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__UpperCamelCase ) def lowercase__ ( self : Any )->Optional[Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__UpperCamelCase ) def lowercase__ ( self : List[str] )->Tuple: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__UpperCamelCase ) @slow def lowercase__ ( self : Tuple )->List[str]: # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _UpperCAmelCase = TFMobileBertModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) @require_tf class _a ( unittest.TestCase): """simple docstring""" @slow def lowercase__ ( self : str )->Dict: _UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' ) _UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) _UpperCAmelCase = model(__UpperCamelCase )[0] _UpperCAmelCase = [1, 6, 3_0_5_2_2] self.assertEqual(output.shape , __UpperCamelCase ) _UpperCAmelCase = tf.constant( [ [ [-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6], [-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7], [-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 )
326
1
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva __A : Optional[Any] = "" __A : Tuple = "" __A : Union[str, Any] = "" __A : List[str] = 1 # (0 is vertical, 1 is horizontal) def lowercase ( ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = get_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print('''Processing...''' ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = update_image_and_anno(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for index, image in enumerate(_SCREAMING_SNAKE_CASE ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _UpperCAmelCase = random_chars(32 ) _UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] _UpperCAmelCase = f'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}' cva.imwrite(f'/{file_root}.jpg' , _SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'Success {index+1}/{len(_SCREAMING_SNAKE_CASE )} with {file_name}' ) _UpperCAmelCase = [] for anno in new_annos[index]: _UpperCAmelCase = f'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}' annos_list.append(_SCREAMING_SNAKE_CASE ) with open(f'/{file_root}.txt' , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = [] for label_file in glob.glob(os.path.join(_SCREAMING_SNAKE_CASE , '''*.txt''' ) ): _UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(_SCREAMING_SNAKE_CASE ) as in_file: _UpperCAmelCase = in_file.readlines() _UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , f'{label_name}.jpg' ) _UpperCAmelCase = [] for obj_list in obj_lists: _UpperCAmelCase = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(_SCREAMING_SNAKE_CASE ) labels.append(_SCREAMING_SNAKE_CASE ) return img_paths, labels def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 1 ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = [] for idx in range(len(_SCREAMING_SNAKE_CASE ) ): _UpperCAmelCase = [] _UpperCAmelCase = img_list[idx] path_list.append(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = anno_list[idx] _UpperCAmelCase = cva.imread(_SCREAMING_SNAKE_CASE ) if flip_type == 1: _UpperCAmelCase = cva.flip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for bbox in img_annos: _UpperCAmelCase = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: _UpperCAmelCase = cva.flip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for bbox in img_annos: _UpperCAmelCase = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(_SCREAMING_SNAKE_CASE ) new_imgs_list.append(_SCREAMING_SNAKE_CASE ) return new_imgs_list, new_annos_lists, path_list def lowercase ( _SCREAMING_SNAKE_CASE : int = 32 ): '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" _UpperCAmelCase = ascii_lowercase + digits return "".join(random.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": main() print("DONE ✅")
326
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('''Input value must be a \'int\' type''' ) return bin(_SCREAMING_SNAKE_CASE ).count('''1''' ) if __name__ == "__main__": import doctest doctest.testmod()
326
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A : Dict = logging.get_logger(__name__) __A : int = "▁" __A : int = {"vocab_file": "sentencepiece.bpe.model"} __A : Any = { "vocab_file": { "facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model", } } __A : List[str] = { "facebook/xglm-564M": 2048, } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = ["""input_ids""", """attention_mask"""] def __init__( self : int , __UpperCamelCase : Dict , __UpperCamelCase : List[Any]="<s>" , __UpperCamelCase : List[Any]="</s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Dict="<s>" , __UpperCamelCase : str="<unk>" , __UpperCamelCase : int="<pad>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : List[Any] , )->None: _UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer _UpperCAmelCase = 7 _UpperCAmelCase = [F'<madeupword{i}>' for i in range(self.num_madeup_words )] _UpperCAmelCase = kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , ) _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCamelCase ) ) _UpperCAmelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _UpperCAmelCase = 1 # Mimic fairseq token-to-id alignment for the first 4 token _UpperCAmelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} _UpperCAmelCase = len(self.sp_model ) _UpperCAmelCase = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(__UpperCamelCase ) _UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : List[Any] )->List[str]: _UpperCAmelCase = self.__dict__.copy() _UpperCAmelCase = None _UpperCAmelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self : Optional[Any] , __UpperCamelCase : str )->Union[str, Any]: _UpperCAmelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCAmelCase = {} _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowercase__ ( self : str , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None )->List[int]: if token_ids_a is None: return [self.sep_token_id] + token_ids_a _UpperCAmelCase = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def lowercase__ ( self : List[str] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False )->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCamelCase )) return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) def lowercase__ ( self : List[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None )->List[int]: _UpperCAmelCase = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def lowercase__ ( self : Any )->Tuple: return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def lowercase__ ( self : List[str] )->Any: _UpperCAmelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase__ ( self : int , __UpperCamelCase : str )->List[str]: return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Optional[Any] )->List[str]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _UpperCAmelCase = self.sp_model.PieceToId(__UpperCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowercase__ ( self : List[str] , __UpperCamelCase : Dict )->int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase__ ( self : List[str] , __UpperCamelCase : int )->Tuple: _UpperCAmelCase = ''''''.join(__UpperCamelCase ).replace(__UpperCamelCase , ''' ''' ).strip() return out_string def lowercase__ ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None )->Tuple[str]: if not os.path.isdir(__UpperCamelCase ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return _UpperCAmelCase = os.path.join( __UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCamelCase , '''wb''' ) as fi: _UpperCAmelCase = self.sp_model.serialized_model_proto() fi.write(__UpperCamelCase ) return (out_vocab_file,)
326
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow __A : Tuple = logging.getLogger() @unittest.skip("""Temporarily disable the doc tests.""") @require_torch @require_tf @slow class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Path , __UpperCamelCase : Union[str, None] = None , __UpperCamelCase : Union[List[str], None] = None , __UpperCamelCase : Union[str, List[str], None] = None , __UpperCamelCase : bool = True , )->Tuple: _UpperCAmelCase = [file for file in os.listdir(__UpperCamelCase ) if os.path.isfile(os.path.join(__UpperCamelCase , __UpperCamelCase ) )] if identifier is not None: _UpperCAmelCase = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(__UpperCamelCase , __UpperCamelCase ): for n_ in n_identifier: _UpperCAmelCase = [file for file in files if n_ not in file] else: _UpperCAmelCase = [file for file in files if n_identifier not in file] _UpperCAmelCase = ignore_files or [] ignore_files.append('''__init__.py''' ) _UpperCAmelCase = [file for file in files if file not in ignore_files] for file in files: # Open all files print('''Testing''' , __UpperCamelCase ) if only_modules: _UpperCAmelCase = file.split('''.''' )[0] try: _UpperCAmelCase = getattr(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = doctest.DocTestSuite(__UpperCamelCase ) _UpperCAmelCase = unittest.TextTestRunner().run(__UpperCamelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F'{module_identifier} is not a module.' ) else: _UpperCAmelCase = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def lowercase__ ( self : str )->int: _UpperCAmelCase = Path('''src/transformers''' ) _UpperCAmelCase = '''modeling''' _UpperCAmelCase = [ '''modeling_ctrl.py''', '''modeling_tf_ctrl.py''', ] self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase , ignore_files=__UpperCamelCase ) def lowercase__ ( self : List[Any] )->int: _UpperCAmelCase = Path('''src/transformers''' ) _UpperCAmelCase = '''tokenization''' self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase ) def lowercase__ ( self : str )->Any: _UpperCAmelCase = Path('''src/transformers''' ) _UpperCAmelCase = '''configuration''' self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase ) def lowercase__ ( self : int )->Optional[Any]: _UpperCAmelCase = Path('''src/transformers''' ) _UpperCAmelCase = ['''configuration''', '''modeling''', '''tokenization'''] self.analyze_directory(__UpperCamelCase , n_identifier=__UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->Any: _UpperCAmelCase = Path('''docs/source''' ) _UpperCAmelCase = ['''favicon.ico'''] self.analyze_directory(__UpperCamelCase , ignore_files=__UpperCamelCase , only_modules=__UpperCamelCase )
326
1
"""simple docstring""" import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _a ( lowerCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = VideoToVideoSDPipeline UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""}) - {"""image""", """width""", """height"""} UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""}) - {"""image"""} UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {"""latents"""} UpperCamelCase__ = False # No `output_type`. UpperCamelCase__ = frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ]) def lowercase__ ( self : Optional[Any] )->Optional[int]: torch.manual_seed(0 ) _UpperCAmelCase = UNetaDConditionModel( block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=3_2 , attention_head_dim=4 , ) _UpperCAmelCase = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , ) torch.manual_seed(0 ) _UpperCAmelCase = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , ) _UpperCAmelCase = CLIPTextModel(__UpperCamelCase ) _UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _UpperCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def lowercase__ ( self : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : int=0 )->List[str]: # 3 frames _UpperCAmelCase = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) if str(__UpperCamelCase ).startswith('''mps''' ): _UpperCAmelCase = torch.manual_seed(__UpperCamelCase ) else: _UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) _UpperCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''video''': video, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''pt''', } return inputs def lowercase__ ( self : Union[str, Any] )->Dict: _UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = VideoToVideoSDPipeline(**__UpperCamelCase ) _UpperCAmelCase = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCAmelCase = self.get_dummy_inputs(__UpperCamelCase ) _UpperCAmelCase = '''np''' _UpperCAmelCase = sd_pipe(**__UpperCamelCase ).frames _UpperCAmelCase = frames[0][-3:, -3:, -1] assert frames[0].shape == (3_2, 3_2, 3) _UpperCAmelCase = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def lowercase__ ( self : Optional[Any] )->int: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCamelCase , expected_max_diff=5e-3 ) @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def lowercase__ ( self : Optional[int] )->Optional[Any]: pass @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def lowercase__ ( self : Union[str, Any] )->List[str]: pass @unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' ) def lowercase__ ( self : List[str] )->Optional[int]: pass def lowercase__ ( self : int )->List[Any]: return super().test_progress_bar() @slow @skip_mps class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : Any )->int: _UpperCAmelCase = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames _UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) _UpperCAmelCase = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=__UpperCamelCase ) _UpperCAmelCase = video.to('''cuda''' ) _UpperCAmelCase = '''Spiderman is surfing''' _UpperCAmelCase = pipe(__UpperCamelCase , video=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=3 , output_type='''pt''' ).frames _UpperCAmelCase = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
326
"""simple docstring""" # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = None def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=0.999 , _SCREAMING_SNAKE_CASE : Any="cosine" , ): '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Tuple ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Any ): return math.exp(t * -12.0 ) else: raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' ) _UpperCAmelCase = [] for i in range(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = i / num_diffusion_timesteps _UpperCAmelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa ) class _a ( lowerCAmelCase , lowerCAmelCase): """simple docstring""" UpperCamelCase__ = 1 @register_to_config def __init__( self : List[Any] , __UpperCamelCase : int = 1_0_0_0 , __UpperCamelCase : float = 0.0_0_0_1 , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : str = "linear" , __UpperCamelCase : Optional[Union[np.ndarray, List[float]]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : int = 0 , __UpperCamelCase : str = "epsilon" , __UpperCamelCase : float = 1.0 , **__UpperCamelCase : Optional[int] , )->Dict: if kwargs.get('''set_alpha_to_one''' , __UpperCamelCase ) is not None: _UpperCAmelCase = ( '''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.''' ) deprecate('''set_alpha_to_one''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase ) _UpperCAmelCase = kwargs['''set_alpha_to_one'''] if trained_betas is not None: _UpperCAmelCase = torch.tensor(__UpperCamelCase , dtype=torch.floataa ) elif beta_schedule == "linear": _UpperCAmelCase = torch.linspace(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _UpperCAmelCase = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCamelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _UpperCAmelCase = betas_for_alpha_bar(__UpperCamelCase ) else: raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' ) _UpperCAmelCase = 1.0 - self.betas _UpperCAmelCase = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. _UpperCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution _UpperCAmelCase = 1.0 # setable values _UpperCAmelCase = None _UpperCAmelCase = torch.from_numpy(np.arange(0 , __UpperCamelCase ).copy().astype(np.intaa ) ) def lowercase__ ( self : str , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : Optional[int] = None )->torch.FloatTensor: return sample def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : Union[str, torch.device] = None )->Any: if num_inference_steps > self.config.num_train_timesteps: raise ValueError( F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:' F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle' F' maximal {self.config.num_train_timesteps} timesteps.' ) _UpperCAmelCase = num_inference_steps _UpperCAmelCase = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _UpperCAmelCase = (np.arange(0 , __UpperCamelCase ) * step_ratio).round().copy().astype(np.intaa ) _UpperCAmelCase = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase ) self.timesteps += self.config.steps_offset def lowercase__ ( self : Any , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : int , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float = 0.0 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : bool = True , )->Union[DDIMSchedulerOutput, Tuple]: # 1. get previous step value (=t+1) _UpperCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process _UpperCAmelCase = self.alphas_cumprod[timestep] _UpperCAmelCase = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) _UpperCAmelCase = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": _UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 _UpperCAmelCase = model_output elif self.config.prediction_type == "sample": _UpperCAmelCase = model_output _UpperCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": _UpperCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output _UpperCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or' ''' `v_prediction`''' ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: _UpperCAmelCase = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _UpperCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase ) def __len__( self : Any )->str: return self.config.num_train_timesteps
326
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __A : str = { "configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = ["MobileViTFeatureExtractor"] __A : List[str] = ["MobileViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ "MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileViTForImageClassification", "MobileViTForSemanticSegmentation", "MobileViTModel", "MobileViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileViTForImageClassification", "TFMobileViTForSemanticSegmentation", "TFMobileViTModel", "TFMobileViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys __A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
326
"""simple docstring""" from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = int(number**0.5 ) return number == sq * sq def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den _UpperCAmelCase = x_den * y_den * z_den _UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) top //= hcf bottom //= hcf return top, bottom def lowercase ( _SCREAMING_SNAKE_CASE : int = 35 ): '''simple docstring''' _UpperCAmelCase = set() _UpperCAmelCase = 42 _UpperCAmelCase = Fraction(0 ) _UpperCAmelCase = 42 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 _UpperCAmelCase = x_num * y_den + x_den * y_num _UpperCAmelCase = x_den * y_den _UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCAmelCase = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 _UpperCAmelCase = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) _UpperCAmelCase = x_den * x_den * y_den * y_den if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCAmelCase = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=-1 _UpperCAmelCase = x_num * y_num _UpperCAmelCase = x_den * y_num + x_num * y_den _UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCAmelCase = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 _UpperCAmelCase = x_num * x_num * y_num * y_num _UpperCAmelCase = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _UpperCAmelCase = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) for num, den in unique_s: total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return total.denominator + total.numerator if __name__ == "__main__": print(f'''{solution() = }''')
326
1
"""simple docstring""" import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class _a ( unittest.TestCase): """simple docstring""" UpperCamelCase__ = JukeboxTokenizer UpperCamelCase__ = { """artist""": """Zac Brown Band""", """genres""": """Country""", """lyrics""": """I met a traveller from an antique land, Who said \"Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away """, } @require_torch def lowercase__ ( self : str )->Tuple: import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' ) _UpperCAmelCase = tokenizer(**self.metas )['''input_ids'''] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7, 7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2, 4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5, 3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6, 2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5, 4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6, 4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3, 7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8, 2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4, 4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4, 7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1, 3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6, 4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9, 3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1, 7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4, 4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9, 4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6, 4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3, 7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6, 4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4, 7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6, 2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7, 3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8, 2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0, 7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5, 7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4, 7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6]] ), torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ), torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def lowercase__ ( self : List[Any] )->int: import torch _UpperCAmelCase = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' ) _UpperCAmelCase = tokenizer(**self.metas )['''input_ids'''] # fmt: off _UpperCAmelCase = [ torch.tensor([[ 0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9, 3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7, 4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1, 7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8, 2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1, 3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7, 4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7, 7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5, 6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1, 4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7, 3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1, 3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9, 4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5, 3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4, 3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7, 3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2, 3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2, 3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7, 1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7, 1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2, 4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7, 4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1, 7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5, 2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ), torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
326
"""simple docstring""" import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' with open(_SCREAMING_SNAKE_CASE ) as metadata_file: _UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path _UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''module'''] # Load the entity vocab file _UpperCAmelCase = load_original_entity_vocab(_SCREAMING_SNAKE_CASE ) # add an entry for [MASK2] _UpperCAmelCase = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 _UpperCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks _UpperCAmelCase = AddedToken('''<ent>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = AddedToken('''<ent2>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''r''' ) as f: _UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = '''MLukeTokenizer''' with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''w''' ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) # Initialize the embeddings of the special tokens _UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''@'''] )[0] _UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''#'''] )[0] _UpperCAmelCase = state_dict['''embeddings.word_embeddings.weight'''] _UpperCAmelCase = word_emb[ent_init_index].unsqueeze(0 ) _UpperCAmelCase = word_emb[enta_init_index].unsqueeze(0 ) _UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: _UpperCAmelCase = state_dict[bias_name] _UpperCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 ) _UpperCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 ) _UpperCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _UpperCAmelCase = f'encoder.layer.{layer_index}.attention.self.' _UpperCAmelCase = state_dict[prefix + matrix_name] _UpperCAmelCase = state_dict[prefix + matrix_name] _UpperCAmelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _UpperCAmelCase = state_dict['''entity_embeddings.entity_embeddings.weight'''] _UpperCAmelCase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 ) _UpperCAmelCase = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' _UpperCAmelCase = state_dict['''entity_predictions.bias'''] _UpperCAmelCase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 ) _UpperCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] ) _UpperCAmelCase = LukeForMaskedLM(config=_SCREAMING_SNAKE_CASE ).eval() state_dict.pop('''entity_predictions.decoder.weight''' ) state_dict.pop('''lm_head.decoder.weight''' ) state_dict.pop('''lm_head.decoder.bias''' ) _UpperCAmelCase = OrderedDict() for key, value in state_dict.items(): if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )): _UpperCAmelCase = state_dict[key] else: _UpperCAmelCase = state_dict[key] _UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) if set(_SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}: raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' ) if set(_SCREAMING_SNAKE_CASE ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f'Unexpected missing_keys: {missing_keys}' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs _UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task='''entity_classification''' ) _UpperCAmelCase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).''' _UpperCAmelCase = (0, 9) _UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCAmelCase = torch.Size((1, 33, 768) ) _UpperCAmelCase = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCAmelCase = torch.Size((1, 1, 768) ) _UpperCAmelCase = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' f' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction _UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = '''Tokyo is the capital of <mask>.''' _UpperCAmelCase = (24, 30) _UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = encoding['''input_ids'''][0].tolist() _UpperCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) ) _UpperCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.entity_logits[0][0].argmax().item() _UpperCAmelCase = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(_SCREAMING_SNAKE_CASE ) ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = ['''[MASK]''', '''[PAD]''', '''[UNK]'''] _UpperCAmelCase = [json.loads(_SCREAMING_SNAKE_CASE ) for line in open(_SCREAMING_SNAKE_CASE )] _UpperCAmelCase = {} for entry in data: _UpperCAmelCase = entry['''id'''] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: _UpperCAmelCase = entity_id break _UpperCAmelCase = f'{language}:{entity_name}' _UpperCAmelCase = entity_id return new_mapping if __name__ == "__main__": __A : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) __A : List[str] = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
326
1
"""simple docstring""" import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict )->Tuple: self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) ) for a, b in zip(__UpperCamelCase , __UpperCamelCase ): self.assertAlmostEqual(__UpperCamelCase , __UpperCamelCase , delta=__UpperCamelCase ) def lowercase__ ( self : List[Any] )->Tuple: _UpperCAmelCase = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(__UpperCamelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def lowercase__ ( self : Optional[int] )->Tuple: _UpperCAmelCase = None ops.enable_eager_execution_internal() _UpperCAmelCase = tf.config.list_physical_devices('''CPU''' ) if len(__UpperCamelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) _UpperCAmelCase = tf.config.list_logical_devices(device_type='''CPU''' ) _UpperCAmelCase = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): _UpperCAmelCase = GradientAccumulator() _UpperCAmelCase = tf.Variable([4.0, 3.0] ) _UpperCAmelCase , _UpperCAmelCase = create_optimizer(5e-5 , 1_0 , 5 ) _UpperCAmelCase = tf.Variable([0.0, 0.0] , trainable=__UpperCamelCase ) def accumulate_on_replica(__UpperCamelCase : Tuple ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(__UpperCamelCase : Dict , __UpperCamelCase : List[str] ): with strategy.scope(): _UpperCAmelCase = strategy.experimental_local_results(__UpperCamelCase ) local_variables[0].assign(__UpperCamelCase ) local_variables[1].assign(__UpperCamelCase ) strategy.run(__UpperCamelCase , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(__UpperCamelCase ) def _check_local_values(__UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] ): _UpperCAmelCase = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , __UpperCamelCase , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , __UpperCamelCase , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
326
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu __A : Tuple = [ "EAGER", "AOT_EAGER", "INDUCTOR", "NVFUSER", "AOT_NVFUSER", "AOT_CUDAGRAPHS", "OFI", "FX2TRT", "ONNXRT", "IPEX", ] def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Tuple=None ): '''simple docstring''' _UpperCAmelCase = True while ask_again: _UpperCAmelCase = input(_SCREAMING_SNAKE_CASE ) try: if default is not None and len(_SCREAMING_SNAKE_CASE ) == 0: return default return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result except Exception: if error_message is not None: print(_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int]=[] , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Dict=0 ): '''simple docstring''' _UpperCAmelCase = BulletMenu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = menu.run(default_choice=_SCREAMING_SNAKE_CASE ) return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] ) def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] ) def lowercase ( _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' _UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' _UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] ) def lowercase ( _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] ) def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' return {"yes": True, "no": False}[value.lower()] class _a ( argparse.RawDescriptionHelpFormatter): """simple docstring""" def lowercase__ ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : List[Any] )->Optional[int]: _UpperCAmelCase = super()._format_usage(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = usage.replace('''<command> [<args>] ''' , '''''' ) return usage
326
1
"""simple docstring""" from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image __A : Optional[Any] = ["text", "image", "audio"] def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' _UpperCAmelCase = [] for input_type in input_types: if input_type == "text": inputs.append('''Text input''' ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) ) elif input_type == "audio": inputs.append(torch.ones(3000 ) ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): inputs.append(create_inputs(_SCREAMING_SNAKE_CASE ) ) else: raise ValueError(f'Invalid type requested: {input_type}' ) return inputs def lowercase ( _SCREAMING_SNAKE_CASE : List ): '''simple docstring''' _UpperCAmelCase = [] for output in outputs: if isinstance(_SCREAMING_SNAKE_CASE , (str, AgentText) ): output_types.append('''text''' ) elif isinstance(_SCREAMING_SNAKE_CASE , (Image.Image, AgentImage) ): output_types.append('''image''' ) elif isinstance(_SCREAMING_SNAKE_CASE , (torch.Tensor, AgentAudio) ): output_types.append('''audio''' ) else: raise ValueError(f'Invalid output: {output}' ) return output_types @is_tool_test class _a : """simple docstring""" def lowercase__ ( self : Union[str, Any] )->Optional[Any]: self.assertTrue(hasattr(self.tool , '''inputs''' ) ) self.assertTrue(hasattr(self.tool , '''outputs''' ) ) _UpperCAmelCase = self.tool.inputs for _input in inputs: if isinstance(_input , __UpperCamelCase ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) _UpperCAmelCase = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def lowercase__ ( self : Any )->int: _UpperCAmelCase = create_inputs(self.tool.inputs ) _UpperCAmelCase = self.tool(*__UpperCamelCase ) # There is a single output if len(self.tool.outputs ) == 1: _UpperCAmelCase = [outputs] self.assertListEqual(output_types(__UpperCamelCase ) , self.tool.outputs ) def lowercase__ ( self : Optional[int] )->Tuple: self.assertTrue(hasattr(self.tool , '''description''' ) ) self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) ) self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) ) def lowercase__ ( self : Tuple )->int: _UpperCAmelCase = create_inputs(self.tool.inputs ) _UpperCAmelCase = self.tool(*__UpperCamelCase ) if not isinstance(__UpperCamelCase , __UpperCamelCase ): _UpperCAmelCase = [outputs] self.assertEqual(len(__UpperCamelCase ) , len(self.tool.outputs ) ) for output, output_type in zip(__UpperCamelCase , self.tool.outputs ): _UpperCAmelCase = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(__UpperCamelCase , __UpperCamelCase ) ) def lowercase__ ( self : List[Any] )->List[Any]: _UpperCAmelCase = create_inputs(self.tool.inputs ) _UpperCAmelCase = [] for _input, input_type in zip(__UpperCamelCase , self.tool.inputs ): if isinstance(__UpperCamelCase , __UpperCamelCase ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error _UpperCAmelCase = self.tool(*__UpperCamelCase ) if not isinstance(__UpperCamelCase , __UpperCamelCase ): _UpperCAmelCase = [outputs] self.assertEqual(len(__UpperCamelCase ) , len(self.tool.outputs ) )
326
"""simple docstring""" import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''--model_ckpt''' , type=_SCREAMING_SNAKE_CASE , default='''microsoft/unixcoder-base-nine''' ) parser.add_argument('''--num_epochs''' , type=_SCREAMING_SNAKE_CASE , default=5 ) parser.add_argument('''--batch_size''' , type=_SCREAMING_SNAKE_CASE , default=6 ) parser.add_argument('''--gradient_accumulation_steps''' , type=_SCREAMING_SNAKE_CASE , default=1 ) parser.add_argument('''--freeze''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE ) parser.add_argument('''--learning_rate''' , type=_SCREAMING_SNAKE_CASE , default=5E-4 ) parser.add_argument('''--seed''' , type=_SCREAMING_SNAKE_CASE , default=0 ) parser.add_argument('''--lr_scheduler_type''' , type=_SCREAMING_SNAKE_CASE , default='''cosine''' ) parser.add_argument('''--num_warmup_steps''' , type=_SCREAMING_SNAKE_CASE , default=10 ) parser.add_argument('''--weight_decay''' , type=_SCREAMING_SNAKE_CASE , default=0.01 ) parser.add_argument('''--output_dir''' , type=_SCREAMING_SNAKE_CASE , default='''./results''' ) return parser.parse_args() __A : Union[str, Any] = load("accuracy") def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = eval_pred _UpperCAmelCase = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 ) return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE ) class _a ( lowerCAmelCase): """simple docstring""" def __init__( self : str , __UpperCamelCase : Union[str, Any] )->None: super().__init__() _UpperCAmelCase = trainer def lowercase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] )->Any: if control.should_evaluate: _UpperCAmelCase = deepcopy(__UpperCamelCase ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' ) return control_copy def lowercase ( ): '''simple docstring''' _UpperCAmelCase = get_args() set_seed(args.seed ) _UpperCAmelCase = load_dataset('''codeparrot/codecomplex''' , split='''train''' ) _UpperCAmelCase = dataset.train_test_split(test_size=0.2 ) _UpperCAmelCase = train_test['''test'''].train_test_split(test_size=0.5 ) _UpperCAmelCase = DatasetDict( { '''train''': train_test['''train'''], '''test''': test_validation['''train'''], '''valid''': test_validation['''test'''], } ) print('''Loading tokenizer and model''' ) _UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt ) _UpperCAmelCase = tokenizer.eos_token _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 ) _UpperCAmelCase = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): _UpperCAmelCase = False _UpperCAmelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) ) def tokenize(_SCREAMING_SNAKE_CASE : Any ): _UpperCAmelCase = tokenizer(example['''src'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=1024 ) _UpperCAmelCase = labels.straint(example['''complexity'''] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } _UpperCAmelCase = train_test_validation.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=train_test_validation['''train'''].column_names , ) _UpperCAmelCase = DataCollatorWithPadding(tokenizer=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = TrainingArguments( output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , ) _UpperCAmelCase = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , ) print('''Training...''' ) trainer.add_callback(CustomCallback(_SCREAMING_SNAKE_CASE ) ) trainer.train() if __name__ == "__main__": main()
326
1
"""simple docstring""" __A : List[Any] = 256 # Modulus to hash a string __A : int = 1000003 def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) if p_len > t_len: return False _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = 1 # Calculating the hash of pattern and substring of text for i in range(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _UpperCAmelCase = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _UpperCAmelCase = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def lowercase ( ): '''simple docstring''' _UpperCAmelCase = '''abc1abc12''' _UpperCAmelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' _UpperCAmelCase = '''alskfjaldsk23adsfabcabc''' assert rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Test 2) _UpperCAmelCase = '''ABABX''' _UpperCAmelCase = '''ABABZABABYABABX''' assert rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Test 3) _UpperCAmelCase = '''AAAB''' _UpperCAmelCase = '''ABAAAAAB''' assert rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Test 4) _UpperCAmelCase = '''abcdabcy''' _UpperCAmelCase = '''abcxabcdabxabcdabcdabcy''' assert rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Test 5) _UpperCAmelCase = '''Lü''' _UpperCAmelCase = '''Lüsai''' assert rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = '''Lue''' assert not rabin_karp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print('''Success.''' ) if __name__ == "__main__": test_rabin_karp()
326
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return "\n".join( f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=10))
326
1
"""simple docstring""" import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __A : List[str] = get_tests_dir("fixtures/test_sentencepiece.model") if is_sentencepiece_available(): import sentencepiece as sp __A : int = 5 __A : str = 10 @require_sentencepiece @require_tokenizers class _a ( lowerCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = SpeechaTextTokenizer UpperCamelCase__ = False UpperCamelCase__ = True def lowercase__ ( self : Optional[int] )->List[Any]: super().setUp() _UpperCAmelCase = sp.SentencePieceProcessor() spm_model.Load(__UpperCamelCase ) _UpperCAmelCase = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>'''] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__UpperCamelCase ) )] _UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) ) _UpperCAmelCase = Path(self.tmpdirname ) save_json(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) _UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self : Any )->Optional[int]: _UpperCAmelCase = '''<pad>''' _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->Optional[int]: _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(__UpperCamelCase ) , 1_0_0_1 ) def lowercase__ ( self : int )->Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_1 ) def lowercase__ ( self : List[Any] )->List[Any]: _UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) _UpperCAmelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__UpperCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6] , ) _UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __UpperCamelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) _UpperCAmelCase = tokenizer.convert_tokens_to_ids(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8] ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(__UpperCamelCase ) self.assertListEqual( __UpperCamelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def lowercase__ ( self : Tuple )->Optional[int]: # fmt: off _UpperCAmelCase = {'''input_ids''': [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCamelCase , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , ) @require_sentencepiece class _a ( unittest.TestCase): """simple docstring""" UpperCamelCase__ = """valhalla/s2t_mustc_multilinguial_medium""" UpperCamelCase__ = """C'est trop cool""" UpperCamelCase__ = """Esto es genial""" @classmethod def lowercase__ ( cls : Union[str, Any] )->str: _UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def lowercase__ ( self : int )->Dict: self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 1_1 ) def lowercase__ ( self : int )->str: self.assertEqual(self.tokenizer.vocab_size , 1_0_0_0_0 ) def lowercase__ ( self : str )->List[str]: self.assertIn(__UpperCamelCase , self.tokenizer.all_special_ids ) _UpperCAmelCase = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2] _UpperCAmelCase = self.tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase ) _UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) self.assertNotIn(self.tokenizer.eos_token , __UpperCamelCase ) def lowercase__ ( self : str )->str: _UpperCAmelCase = '''fr''' _UpperCAmelCase = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , __UpperCamelCase ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def lowercase__ ( self : Optional[int] )->Tuple: _UpperCAmelCase = '''fr''' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) _UpperCAmelCase = '''es''' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
326
"""simple docstring""" class _a : """simple docstring""" def __init__( self : Tuple , __UpperCamelCase : list[int] )->None: _UpperCAmelCase = len(__UpperCamelCase ) _UpperCAmelCase = [0] * len_array if len_array > 0: _UpperCAmelCase = array[0] for i in range(1 , __UpperCamelCase ): _UpperCAmelCase = self.prefix_sum[i - 1] + array[i] def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : int )->int: if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowercase__ ( self : List[Any] , __UpperCamelCase : int )->bool: _UpperCAmelCase = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__UpperCamelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
326
1
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : int = 100 ): '''simple docstring''' _UpperCAmelCase = set() _UpperCAmelCase = 0 _UpperCAmelCase = n + 1 # maximum limit for a in range(2 , _SCREAMING_SNAKE_CASE ): for b in range(2 , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = a**b # calculates the current power collect_powers.add(_SCREAMING_SNAKE_CASE ) # adds the result to the set return len(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print("Number of terms ", solution(int(str(input()).strip())))
326
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Optional[int] = {"configuration_mmbt": ["MMBTConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
326
1
"""simple docstring""" from __future__ import annotations def lowercase ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , ): '''simple docstring''' if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative in a semiconductor''' ) elif hole_conc < 0: raise ValueError('''Hole concentration cannot be negative in a semiconductor''' ) elif intrinsic_conc < 0: raise ValueError( '''Intrinsic concentration cannot be negative in a semiconductor''' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
326
"""simple docstring""" __A : Tuple = frozenset( [ "prompt", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) __A : Union[str, Any] = frozenset(["prompt", "negative_prompt"]) __A : str = frozenset([]) __A : List[str] = frozenset(["image"]) __A : Optional[Any] = frozenset( [ "image", "height", "width", "guidance_scale", ] ) __A : Optional[int] = frozenset(["image"]) __A : Optional[int] = frozenset( [ "prompt", "image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) __A : Optional[Any] = frozenset(["prompt", "image", "negative_prompt"]) __A : str = frozenset( [ # Text guided image variation with an image mask "prompt", "image", "mask_image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) __A : Tuple = frozenset(["prompt", "image", "mask_image", "negative_prompt"]) __A : List[str] = frozenset( [ # image variation with an image mask "image", "mask_image", "height", "width", "guidance_scale", ] ) __A : List[Any] = frozenset(["image", "mask_image"]) __A : List[str] = frozenset( [ "example_image", "image", "mask_image", "height", "width", "guidance_scale", ] ) __A : Tuple = frozenset(["example_image", "image", "mask_image"]) __A : Dict = frozenset(["class_labels"]) __A : str = frozenset(["class_labels"]) __A : str = frozenset(["batch_size"]) __A : Union[str, Any] = frozenset([]) __A : str = frozenset(["batch_size"]) __A : Optional[int] = frozenset([]) __A : Any = frozenset( [ "prompt", "audio_length_in_s", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) __A : List[str] = frozenset(["prompt", "negative_prompt"]) __A : Tuple = frozenset(["input_tokens"]) __A : Optional[int] = frozenset(["input_tokens"])
326
1
"""simple docstring""" class _a : """simple docstring""" def __init__( self : Tuple , __UpperCamelCase : list[int] )->None: _UpperCAmelCase = len(__UpperCamelCase ) _UpperCAmelCase = [0] * len_array if len_array > 0: _UpperCAmelCase = array[0] for i in range(1 , __UpperCamelCase ): _UpperCAmelCase = self.prefix_sum[i - 1] + array[i] def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : int )->int: if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowercase__ ( self : List[Any] , __UpperCamelCase : int )->bool: _UpperCAmelCase = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__UpperCamelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
326
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : Optional[Any] = { "configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"], "convert_funnel_original_tf_checkpoint_to_pytorch": [], "tokenization_funnel": ["FunnelTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = ["FunnelTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "FunnelBaseModel", "FunnelForMaskedLM", "FunnelForMultipleChoice", "FunnelForPreTraining", "FunnelForQuestionAnswering", "FunnelForSequenceClassification", "FunnelForTokenClassification", "FunnelModel", "FunnelPreTrainedModel", "load_tf_weights_in_funnel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Dict = [ "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFunnelBaseModel", "TFFunnelForMaskedLM", "TFFunnelForMultipleChoice", "TFFunnelForPreTraining", "TFFunnelForQuestionAnswering", "TFFunnelForSequenceClassification", "TFFunnelForTokenClassification", "TFFunnelModel", "TFFunnelPreTrainedModel", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
326
1
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __A : str = abspath(join(dirname(__file__), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def lowercase ( _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main _UpperCAmelCase = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' if exitstatus == 5: _UpperCAmelCase = 0 # Doctest custom flag to ignore output. __A : List[Any] = doctest.register_optionflag("IGNORE_RESULT") __A : Any = doctest.OutputChecker class _a ( lowerCAmelCase): """simple docstring""" def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] )->Dict: if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) __A : int = CustomOutputChecker __A : List[Any] = HfDoctestModule __A : List[str] = HfDocTestParser
326
"""simple docstring""" import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __A : Union[str, Any] = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. __A : Tuple = importlib.util.spec_from_file_location( "transformers", os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __A : List[str] = spec.loader.load_module() __A : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __A : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)") __A : List[str] = { "CLIPConfigMixin", "DecisionTransformerConfigMixin", "EncoderDecoderConfigMixin", "RagConfigMixin", "SpeechEncoderDecoderConfigMixin", "VisionEncoderDecoderConfigMixin", "VisionTextDualEncoderConfigMixin", } def lowercase ( ): '''simple docstring''' _UpperCAmelCase = [] for config_class in list(CONFIG_MAPPING.values() ): _UpperCAmelCase = False # source code of `config_class` _UpperCAmelCase = inspect.getsource(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = _re_checkpoint.findall(_SCREAMING_SNAKE_CASE ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` _UpperCAmelCase , _UpperCAmelCase = checkpoint # verify the checkpoint name corresponds to the checkpoint link _UpperCAmelCase = f'https://huggingface.co/{ckpt_name}' if ckpt_link == ckpt_link_from_name: _UpperCAmelCase = True break _UpperCAmelCase = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: _UpperCAmelCase = '''\n'''.join(sorted(_SCREAMING_SNAKE_CASE ) ) raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
326
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() __A : Tuple = logging.get_logger(__name__) __A : str = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } __A : List[Any] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' _UpperCAmelCase = {} with open(_SCREAMING_SNAKE_CASE , '''r''' ) as file: for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = line.strip() if line: _UpperCAmelCase = line.split() _UpperCAmelCase = line_number _UpperCAmelCase = words[0] _UpperCAmelCase = value return result def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' for attribute in key.split('''.''' ): _UpperCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]] _UpperCAmelCase = '''param''' if weight_type is not None and weight_type != "param": _UpperCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape elif weight_type is not None and weight_type == "param": _UpperCAmelCase = hf_pointer for attribute in hf_param_name.split('''.''' ): _UpperCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = shape_pointer.shape # let's reduce dimension _UpperCAmelCase = value[0] else: _UpperCAmelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": _UpperCAmelCase = value elif weight_type == "weight_g": _UpperCAmelCase = value elif weight_type == "weight_v": _UpperCAmelCase = value elif weight_type == "bias": _UpperCAmelCase = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): _UpperCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = value else: _UpperCAmelCase = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' _UpperCAmelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]] _UpperCAmelCase = '''param''' if weight_type is not None and weight_type != "param": _UpperCAmelCase = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": _UpperCAmelCase = '''.'''.join([key, hf_param_name] ) else: _UpperCAmelCase = key _UpperCAmelCase = value if '''lm_head''' in full_key else value[0] __A : Any = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Dict=None ): '''simple docstring''' _UpperCAmelCase = False for key, mapped_key in MAPPING.items(): _UpperCAmelCase = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _UpperCAmelCase = True if "*" in mapped_key: _UpperCAmelCase = name.split(_SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2] _UpperCAmelCase = mapped_key.replace('''*''' , _SCREAMING_SNAKE_CASE ) if "weight_g" in name: _UpperCAmelCase = '''weight_g''' elif "weight_v" in name: _UpperCAmelCase = '''weight_v''' elif "bias" in name: _UpperCAmelCase = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj _UpperCAmelCase = '''weight''' else: _UpperCAmelCase = None if hf_dict is not None: rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return is_used return is_used def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = fairseq_model.state_dict() _UpperCAmelCase = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): _UpperCAmelCase = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , ) _UpperCAmelCase = True else: _UpperCAmelCase = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(f'Unused weights: {unused_weights}' ) def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = full_name.split('''conv_layers.''' )[-1] _UpperCAmelCase = name.split('''.''' ) _UpperCAmelCase = int(items[0] ) _UpperCAmelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) _UpperCAmelCase = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) _UpperCAmelCase = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) _UpperCAmelCase = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) _UpperCAmelCase = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : List[str]=False ): '''simple docstring''' if config_path is not None: _UpperCAmelCase = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = WavaVecaConfig() if is_seq_class: _UpperCAmelCase = read_txt_into_dict(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = idalabel _UpperCAmelCase = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) elif is_finetuned: if dict_path: _UpperCAmelCase = Dictionary.load(_SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _UpperCAmelCase = target_dict.pad_index _UpperCAmelCase = target_dict.bos_index _UpperCAmelCase = target_dict.eos_index _UpperCAmelCase = len(target_dict.symbols ) _UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , '''vocab.json''' ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_SCREAMING_SNAKE_CASE ) ) return os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = target_dict.indices # fairseq has the <pad> and <s> switched _UpperCAmelCase = 0 _UpperCAmelCase = 1 with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = WavaVecaCTCTokenizer( _SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = True if config.feat_extract_norm == '''layer''' else False _UpperCAmelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = WavaVecaForCTC(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE ) if is_finetuned or is_seq_class: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _UpperCAmelCase = argparse.Namespace(task='''audio_pretraining''' ) _UpperCAmelCase = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model[0].eval() recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) __A : List[Any] = parser.parse_args() __A : Optional[Any] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
326
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if bit_count < 0: raise ValueError('''The given input must be positive''' ) # get the generated string sequence _UpperCAmelCase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE ) # # convert them to integers for i in range(len(_SCREAMING_SNAKE_CASE ) ): _UpperCAmelCase = int(sequence[i] , 2 ) return sequence def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] _UpperCAmelCase = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits _UpperCAmelCase = gray_code_sequence_string(bit_count - 1 ) _UpperCAmelCase = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): _UpperCAmelCase = '''0''' + smaller_sequence[i] sequence.append(_SCREAMING_SNAKE_CASE ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): _UpperCAmelCase = '''1''' + smaller_sequence[i] sequence.append(_SCREAMING_SNAKE_CASE ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
326
1
"""simple docstring""" from __future__ import annotations def lowercase ( _SCREAMING_SNAKE_CASE : tuple[int, int] , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = position _UpperCAmelCase = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] _UpperCAmelCase = [] for position in positions: _UpperCAmelCase , _UpperCAmelCase = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_SCREAMING_SNAKE_CASE ) return permissible_positions def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] ): '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : tuple[int, int] , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if is_complete(_SCREAMING_SNAKE_CASE ): return True for position in get_valid_pos(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ): _UpperCAmelCase , _UpperCAmelCase = position if board[y][x] == 0: _UpperCAmelCase = curr + 1 if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , curr + 1 ): return True _UpperCAmelCase = 0 return False def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = [[0 for i in range(_SCREAMING_SNAKE_CASE )] for j in range(_SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 1 if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , (i, j) , 1 ): return board _UpperCAmelCase = 0 _UpperCAmelCase = f'Open Kight Tour cannot be performed on a board of size {n}' raise ValueError(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
326
"""simple docstring""" import math def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 0 , _SCREAMING_SNAKE_CASE : int = 0 ): '''simple docstring''' _UpperCAmelCase = end or len(_SCREAMING_SNAKE_CASE ) for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = i _UpperCAmelCase = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: _UpperCAmelCase = array[temp_index - 1] temp_index -= 1 _UpperCAmelCase = temp_index_value return array def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): # Max Heap '''simple docstring''' _UpperCAmelCase = index _UpperCAmelCase = 2 * index + 1 # Left Node _UpperCAmelCase = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: _UpperCAmelCase = left_index if right_index < heap_size and array[largest] < array[right_index]: _UpperCAmelCase = right_index if largest != index: _UpperCAmelCase , _UpperCAmelCase = array[largest], array[index] heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : list ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) for i in range(n // 2 , -1 , -1 ): heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in range(n - 1 , 0 , -1 ): _UpperCAmelCase , _UpperCAmelCase = array[0], array[i] heapify(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE ) return array def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = low _UpperCAmelCase = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i _UpperCAmelCase , _UpperCAmelCase = array[j], array[i] i += 1 def lowercase ( _SCREAMING_SNAKE_CASE : list ): '''simple docstring''' if len(_SCREAMING_SNAKE_CASE ) == 0: return array _UpperCAmelCase = 2 * math.ceil(math.loga(len(_SCREAMING_SNAKE_CASE ) ) ) _UpperCAmelCase = 16 return intro_sort(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(_SCREAMING_SNAKE_CASE ) max_depth -= 1 _UpperCAmelCase = median_of_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 ) _UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) intro_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = p return insertion_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() __A : List[str] = input("Enter numbers separated by a comma : ").strip() __A : Optional[Any] = [float(item) for item in user_input.split(",")] print(sort(unsorted))
326
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __A : str = logging.get_logger(__name__) def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple=False ): '''simple docstring''' _UpperCAmelCase = [] # fmt: off # stem: rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') ) rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') ) rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') ) # backbone rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') ) rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') ) rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight') ) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight') ) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias') ) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight') ) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight') ) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias') ) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight') ) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight') ) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias') ) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight') ) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight') ) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) # fmt: on return rename_keys def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: _UpperCAmelCase = '''''' else: _UpperCAmelCase = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _UpperCAmelCase = state_dict.pop(f'blocks.{i}.attn.qkv.weight' ) _UpperCAmelCase = state_dict.pop(f'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict _UpperCAmelCase = in_proj_weight[ : config.hidden_size, : ] _UpperCAmelCase = in_proj_bias[: config.hidden_size] _UpperCAmelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _UpperCAmelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _UpperCAmelCase = in_proj_weight[ -config.hidden_size :, : ] _UpperCAmelCase = in_proj_bias[-config.hidden_size :] def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = dct.pop(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = val def lowercase ( ): '''simple docstring''' _UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _UpperCAmelCase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any]=False ): '''simple docstring''' _UpperCAmelCase = BitConfig( global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = ViTHybridConfig(backbone_config=_SCREAMING_SNAKE_CASE , image_size=384 , num_labels=1000 ) _UpperCAmelCase = False # load original model from timm _UpperCAmelCase = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() # load state_dict of original model, remove and rename some keys _UpperCAmelCase = timm_model.state_dict() if base_model: remove_classification_head_(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = '''huggingface/label-files''' _UpperCAmelCase = '''imagenet-1k-id2label.json''' _UpperCAmelCase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) ) _UpperCAmelCase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _UpperCAmelCase = ViTHybridModel(_SCREAMING_SNAKE_CASE ).eval() else: _UpperCAmelCase = ViTHybridForImageClassification(_SCREAMING_SNAKE_CASE ).eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # create image processor _UpperCAmelCase = create_transform(**resolve_data_config({} , model=_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = transform.transforms _UpperCAmelCase = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } _UpperCAmelCase = ViTHybridImageProcessor( do_resize=_SCREAMING_SNAKE_CASE , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_SCREAMING_SNAKE_CASE , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_SCREAMING_SNAKE_CASE , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _UpperCAmelCase = prepare_img() _UpperCAmelCase = transform(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) _UpperCAmelCase = processor(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values # verify pixel values assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # verify logits with torch.no_grad(): _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.logits print('''Predicted class:''' , logits.argmax(-1 ).item() ) if base_model: _UpperCAmelCase = timm_model.forward_features(_SCREAMING_SNAKE_CASE ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 ) else: _UpperCAmelCase = timm_model(_SCREAMING_SNAKE_CASE ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: print(f'Pushing model and processor to the hub {vit_name}' ) model.push_to_hub(f'ybelkada/{vit_name}' ) processor.push_to_hub(f'ybelkada/{vit_name}' ) if __name__ == "__main__": __A : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--vit_name", default="vit_base_r50_s16_384", type=str, help="Name of the hybrid ViT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub." ) __A : Tuple = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
326
"""simple docstring""" from __future__ import annotations import numpy as np def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = np.shape(_SCREAMING_SNAKE_CASE ) if rows != columns: _UpperCAmelCase = ( '''\'table\' has to be of square shaped array but got a ''' f'{rows}x{columns} array:\n{table}' ) raise ValueError(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = np.zeros((rows, columns) ) _UpperCAmelCase = np.zeros((rows, columns) ) for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) ) if upper[j][j] == 0: raise ArithmeticError('''No LU decomposition exists''' ) _UpperCAmelCase = (table[i][j] - total) / upper[j][j] _UpperCAmelCase = 1 for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
326
1
"""simple docstring""" import math from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : Optional[Any] = { "facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json", # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """data2vec-audio""" def __init__( self : List[str] , __UpperCamelCase : Tuple=3_2 , __UpperCamelCase : int=7_6_8 , __UpperCamelCase : List[str]=1_2 , __UpperCamelCase : List[Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Optional[Any]="gelu" , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Any=0.0_2 , __UpperCamelCase : List[Any]=1e-5 , __UpperCamelCase : Optional[Any]="gelu" , __UpperCamelCase : Union[str, Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __UpperCamelCase : str=(5, 2, 2, 2, 2, 2, 2) , __UpperCamelCase : List[str]=(1_0, 3, 3, 3, 3, 2, 2) , __UpperCamelCase : int=False , __UpperCamelCase : Dict=1_6 , __UpperCamelCase : Any=1_9 , __UpperCamelCase : List[Any]=5 , __UpperCamelCase : Union[str, Any]=0.0_5 , __UpperCamelCase : int=1_0 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : Union[str, Any]=1_0 , __UpperCamelCase : List[str]=0 , __UpperCamelCase : int="sum" , __UpperCamelCase : List[Any]=False , __UpperCamelCase : List[Any]=False , __UpperCamelCase : Tuple=2_5_6 , __UpperCamelCase : str=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __UpperCamelCase : Optional[int]=(5, 3, 3, 1, 1) , __UpperCamelCase : int=(1, 2, 3, 1, 1) , __UpperCamelCase : int=5_1_2 , __UpperCamelCase : Optional[Any]=0 , __UpperCamelCase : Dict=1 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Tuple=3 , __UpperCamelCase : Dict=2 , __UpperCamelCase : List[str]=3 , __UpperCamelCase : Union[str, Any]=None , **__UpperCamelCase : Optional[int] , )->List[Any]: super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase ) _UpperCAmelCase = hidden_size _UpperCAmelCase = feat_extract_activation _UpperCAmelCase = list(__UpperCamelCase ) _UpperCAmelCase = list(__UpperCamelCase ) _UpperCAmelCase = list(__UpperCamelCase ) _UpperCAmelCase = conv_bias _UpperCAmelCase = num_conv_pos_embeddings _UpperCAmelCase = num_conv_pos_embedding_groups _UpperCAmelCase = conv_pos_kernel_size _UpperCAmelCase = len(self.conv_dim ) _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = activation_dropout _UpperCAmelCase = feat_proj_dropout _UpperCAmelCase = final_dropout _UpperCAmelCase = layerdrop _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = vocab_size _UpperCAmelCase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _UpperCAmelCase = mask_time_prob _UpperCAmelCase = mask_time_length _UpperCAmelCase = mask_time_min_masks _UpperCAmelCase = mask_feature_prob _UpperCAmelCase = mask_feature_length _UpperCAmelCase = mask_feature_min_masks # ctc loss _UpperCAmelCase = ctc_loss_reduction _UpperCAmelCase = ctc_zero_infinity # adapter _UpperCAmelCase = add_adapter _UpperCAmelCase = adapter_kernel_size _UpperCAmelCase = adapter_stride _UpperCAmelCase = num_adapter_layers _UpperCAmelCase = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. _UpperCAmelCase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _UpperCAmelCase = list(__UpperCamelCase ) _UpperCAmelCase = list(__UpperCamelCase ) _UpperCAmelCase = list(__UpperCamelCase ) _UpperCAmelCase = xvector_output_dim @property def lowercase__ ( self : Union[str, Any] )->Optional[int]: return math.prod(self.conv_stride )
326
"""simple docstring""" import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _a ( lowerCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = CTRLTokenizer UpperCamelCase__ = False UpperCamelCase__ = False def lowercase__ ( self : Dict )->str: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCAmelCase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>'''] _UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) ) _UpperCAmelCase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', ''''''] _UpperCAmelCase = {'''unk_token''': '''<unk>'''} _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__UpperCamelCase ) ) def lowercase__ ( self : str , **__UpperCamelCase : Union[str, Any] )->Any: kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[int] )->Tuple: _UpperCAmelCase = '''adapt react readapt apt''' _UpperCAmelCase = '''adapt react readapt apt''' return input_text, output_text def lowercase__ ( self : Dict )->Optional[int]: _UpperCAmelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _UpperCAmelCase = '''adapt react readapt apt''' _UpperCAmelCase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split() _UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = tokens + [tokenizer.unk_token] _UpperCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
326
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __A : List[Any] = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = ["YolosFeatureExtractor"] __A : Dict = ["YolosImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = [ "YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST", "YolosForObjectDetection", "YolosModel", "YolosPreTrainedModel", ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys __A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
326
"""simple docstring""" import logging import os from .state import PartialState class _a ( logging.LoggerAdapter): """simple docstring""" @staticmethod def lowercase__ ( __UpperCamelCase : Optional[Any] )->List[Any]: _UpperCAmelCase = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Union[str, Any] )->int: if PartialState._shared_state == {}: raise RuntimeError( '''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' ) _UpperCAmelCase = kwargs.pop('''main_process_only''' , __UpperCamelCase ) _UpperCAmelCase = kwargs.pop('''in_order''' , __UpperCamelCase ) if self.isEnabledFor(__UpperCamelCase ): if self._should_log(__UpperCamelCase ): _UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase ) self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) elif in_order: _UpperCAmelCase = PartialState() for i in range(state.num_processes ): if i == state.process_index: _UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase ) self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) state.wait_for_everyone() def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = None ): '''simple docstring''' if log_level is None: _UpperCAmelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = logging.getLogger(_SCREAMING_SNAKE_CASE ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(_SCREAMING_SNAKE_CASE , {} )
326
1
"""simple docstring""" import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class _a : """simple docstring""" def __init__( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any]=1_3 , __UpperCamelCase : Optional[int]=3_0 , __UpperCamelCase : Any=2 , __UpperCamelCase : int=3 , __UpperCamelCase : int=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : str=3_2 , __UpperCamelCase : Dict=5 , __UpperCamelCase : str=4 , __UpperCamelCase : Optional[int]=3_7 , __UpperCamelCase : int="gelu" , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Any=1_0 , __UpperCamelCase : str=0.0_2 , __UpperCamelCase : Union[str, Any]=3 , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=2 , )->str: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = scope _UpperCAmelCase = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) _UpperCAmelCase = (image_size // patch_size) ** 2 _UpperCAmelCase = num_patches + 2 def lowercase__ ( self : List[str] )->str: _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowercase__ ( self : Dict )->Optional[Any]: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowercase__ ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] )->Dict: _UpperCAmelCase = DeiTModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : List[str] )->Tuple: _UpperCAmelCase = DeiTForMaskedImageModeling(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _UpperCAmelCase = 1 _UpperCAmelCase = DeiTForMaskedImageModeling(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowercase__ ( self : int , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Dict )->Optional[Any]: _UpperCAmelCase = self.type_sequence_label_size _UpperCAmelCase = DeiTForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _UpperCAmelCase = 1 _UpperCAmelCase = DeiTForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self : Optional[int] )->List[Any]: _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) UpperCamelCase__ = ( { """feature-extraction""": DeiTModel, """image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def lowercase__ ( self : Optional[int] )->int: _UpperCAmelCase = DeiTModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=3_7 ) def lowercase__ ( self : List[Any] )->List[str]: self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def lowercase__ ( self : Dict )->str: pass def lowercase__ ( self : int )->List[Any]: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) ) def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(__UpperCamelCase ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def lowercase__ ( self : Optional[int] )->str: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Dict: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->List[Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) def lowercase__ ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : str=False )->Dict: _UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def lowercase__ ( self : str )->Union[str, Any]: if not self.model_tester.is_training: return _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__UpperCamelCase ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue _UpperCAmelCase = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.train() _UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) _UpperCAmelCase = model(**__UpperCamelCase ).loss loss.backward() def lowercase__ ( self : List[Any] )->Tuple: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _UpperCAmelCase = False _UpperCAmelCase = True for model_class in self.all_model_classes: if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue _UpperCAmelCase = model_class(__UpperCamelCase ) model.gradient_checkpointing_enable() model.to(__UpperCamelCase ) model.train() _UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) _UpperCAmelCase = model(**__UpperCamelCase ).loss loss.backward() def lowercase__ ( self : Any )->Dict: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = [ {'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float}, {'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long}, {'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__UpperCamelCase ), *get_values(__UpperCamelCase ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ): _UpperCAmelCase = problem_type['''title'''] _UpperCAmelCase = problem_type['''num_labels'''] _UpperCAmelCase = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.train() _UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) if problem_type["num_labels"] > 1: _UpperCAmelCase = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] ) _UpperCAmelCase = inputs['''labels'''].to(problem_type['''dtype'''] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__UpperCamelCase ) as warning_list: _UpperCAmelCase = model(**__UpperCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F'Something is going wrong in the regression problem: intercepted {w.message}' ) loss.backward() @slow def lowercase__ ( self : int )->Optional[Any]: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = DeiTModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _a ( unittest.TestCase): """simple docstring""" @cached_property def lowercase__ ( self : Union[str, Any] )->Tuple: return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def lowercase__ ( self : Dict )->List[Any]: _UpperCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to( __UpperCamelCase ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**__UpperCamelCase ) # verify the logits _UpperCAmelCase = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) _UpperCAmelCase = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def lowercase__ ( self : Optional[int] )->str: _UpperCAmelCase = DeiTModel.from_pretrained( '''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ) _UpperCAmelCase = inputs.pixel_values.to(__UpperCamelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): _UpperCAmelCase = model(__UpperCamelCase )
326
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __A : List[Any] = logging.get_logger(__name__) class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = ["""pixel_values"""] def __init__( self : Tuple , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Dict[str, int]] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_5_5 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : Tuple , )->None: super().__init__(**__UpperCamelCase ) _UpperCAmelCase = size if size is not None else {'''shortest_edge''': 2_5_6} _UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) _UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} _UpperCAmelCase = get_size_dict(__UpperCamelCase ) _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = resample _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : int , )->np.ndarray: _UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) _UpperCAmelCase = get_resize_output_image_size(__UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCamelCase ) return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : Dict , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , )->np.ndarray: _UpperCAmelCase = get_size_dict(__UpperCamelCase ) return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : Any , __UpperCamelCase : np.ndarray , __UpperCamelCase : float , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Union[str, Any] )->np.ndarray: return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , )->np.ndarray: return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : List[str] , __UpperCamelCase : ImageInput , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[float] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__UpperCamelCase : str , )->List[Any]: _UpperCAmelCase = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase = size if size is not None else self.size _UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) _UpperCAmelCase = resample if resample is not None else self.resample _UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCAmelCase = crop_size if crop_size is not None else self.crop_size _UpperCAmelCase = get_size_dict(__UpperCamelCase ) _UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase = image_mean if image_mean is not None else self.image_mean _UpperCAmelCase = image_std if image_std is not None else self.image_std _UpperCAmelCase = make_list_of_images(__UpperCamelCase ) if not valid_images(__UpperCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _UpperCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images] if do_resize: _UpperCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images] if do_center_crop: _UpperCAmelCase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images] if do_rescale: _UpperCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images] if do_normalize: _UpperCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images] _UpperCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images] _UpperCAmelCase = {'''pixel_values''': images} return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
326
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __A : Any = logging.get_logger(__name__) __A : str = {"vocab_file": "vocab.txt"} __A : str = { "vocab_file": { "YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt", "YituTech/conv-bert-medium-small": ( "https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt" ), "YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt", } } __A : List[Any] = { "YituTech/conv-bert-base": 512, "YituTech/conv-bert-medium-small": 512, "YituTech/conv-bert-small": 512, } __A : List[Any] = { "YituTech/conv-bert-base": {"do_lower_case": True}, "YituTech/conv-bert-medium-small": {"do_lower_case": True}, "YituTech/conv-bert-small": {"do_lower_case": True}, } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = ConvBertTokenizer def __init__( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : List[str]="[UNK]" , __UpperCamelCase : str="[SEP]" , __UpperCamelCase : Optional[Any]="[PAD]" , __UpperCamelCase : Dict="[CLS]" , __UpperCamelCase : List[str]="[MASK]" , __UpperCamelCase : Tuple=True , __UpperCamelCase : Tuple=None , **__UpperCamelCase : str , )->str: super().__init__( __UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , **__UpperCamelCase , ) _UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , __UpperCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , __UpperCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __UpperCamelCase ) != tokenize_chinese_chars ): _UpperCAmelCase = getattr(__UpperCamelCase , normalizer_state.pop('''type''' ) ) _UpperCAmelCase = do_lower_case _UpperCAmelCase = strip_accents _UpperCAmelCase = tokenize_chinese_chars _UpperCAmelCase = normalizer_class(**__UpperCamelCase ) _UpperCAmelCase = do_lower_case def lowercase__ ( self : int , __UpperCamelCase : Any , __UpperCamelCase : List[Any]=None )->Optional[int]: _UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ ( self : List[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None )->List[int]: _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self : int , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None )->Tuple[str]: _UpperCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase ) return tuple(__UpperCamelCase )
326
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __A : List[Any] = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys __A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
326
1
"""simple docstring""" import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _a : """simple docstring""" @staticmethod def lowercase__ ( *__UpperCamelCase : Tuple , **__UpperCamelCase : Dict )->List[str]: pass @is_pipeline_test @require_vision class _a ( unittest.TestCase): """simple docstring""" @require_torch def lowercase__ ( self : Dict )->str: _UpperCAmelCase = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) _UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _UpperCAmelCase = image_classifier(__UpperCamelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__UpperCamelCase ) , [ [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}], [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}], ] , ) _UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCamelCase ) , [ [ {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, ], ] , ) @require_tf def lowercase__ ( self : Any )->Optional[Any]: _UpperCAmelCase = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) _UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _UpperCAmelCase = image_classifier(__UpperCamelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__UpperCamelCase ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , ) _UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCamelCase ) , [ [ {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, ], [ {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, {'''score''': 0.3_3_3, '''label''': ANY(__UpperCamelCase )}, ], ] , ) @slow @require_torch def lowercase__ ( self : Optional[Any] )->Dict: _UpperCAmelCase = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes _UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _UpperCAmelCase = image_classifier(__UpperCamelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCamelCase ) , [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ] , ) _UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCamelCase ) , [ [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def lowercase__ ( self : Tuple )->str: _UpperCAmelCase = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes _UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _UpperCAmelCase = image_classifier(__UpperCamelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCamelCase ) , [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ] , ) _UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCamelCase ) , [ [ {'''score''': 0.5_1_1, '''label''': '''remote'''}, {'''score''': 0.4_8_5, '''label''': '''cat'''}, {'''score''': 0.0_0_4, '''label''': '''plane'''}, ], ] * 5 , )
326
"""simple docstring""" from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = None UpperCamelCase__ = None __A : Union[str, Any] = namedtuple("CoinsDistribResult", "moves excess") def lowercase ( _SCREAMING_SNAKE_CASE : TreeNode | None ): '''simple docstring''' if root is None: return 0 # Validation def count_nodes(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ): raise ValueError('''The nodes number should be same as the number of coins''' ) # Main calculation def get_distrib(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) _UpperCAmelCase , _UpperCAmelCase = get_distrib(node.left ) _UpperCAmelCase , _UpperCAmelCase = get_distrib(node.right ) _UpperCAmelCase = 1 - left_distrib_excess _UpperCAmelCase = 1 - right_distrib_excess _UpperCAmelCase = ( left_distrib_moves + right_distrib_moves + abs(_SCREAMING_SNAKE_CASE ) + abs(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = node.data - coins_to_left - coins_to_right return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return get_distrib(_SCREAMING_SNAKE_CASE )[0] if __name__ == "__main__": import doctest doctest.testmod()
326
1
"""simple docstring""" import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class _a ( lowerCAmelCase): """simple docstring""" def lowercase__ ( self : Optional[int] )->Any: _UpperCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__UpperCamelCase , '''hidden_sizes''' ) ) self.parent.assertTrue(hasattr(__UpperCamelCase , '''num_attention_heads''' ) ) self.parent.assertTrue(hasattr(__UpperCamelCase , '''num_encoder_blocks''' ) ) class _a : """simple docstring""" def __init__( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str=1_3 , __UpperCamelCase : str=6_4 , __UpperCamelCase : int=3 , __UpperCamelCase : int=4 , __UpperCamelCase : int=[2, 2, 2, 2] , __UpperCamelCase : Optional[int]=[8, 4, 2, 1] , __UpperCamelCase : List[Any]=[1_6, 3_2, 6_4, 1_2_8] , __UpperCamelCase : str=[1, 4, 8, 1_6] , __UpperCamelCase : List[Any]=[1, 2, 4, 8] , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : str="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : str=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Union[str, Any]=None , )->Union[str, Any]: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = num_channels _UpperCAmelCase = num_encoder_blocks _UpperCAmelCase = sr_ratios _UpperCAmelCase = depths _UpperCAmelCase = hidden_sizes _UpperCAmelCase = downsampling_rates _UpperCAmelCase = num_attention_heads _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = scope def lowercase__ ( self : int )->Tuple: _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowercase__ ( self : str )->Union[str, Any]: return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : Tuple )->Optional[Any]: _UpperCAmelCase = SegformerModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = _UpperCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] )->int: _UpperCAmelCase = self.num_labels _UpperCAmelCase = SegformerForSemanticSegmentation(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) _UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def lowercase__ ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] )->str: _UpperCAmelCase = 1 _UpperCAmelCase = SegformerForSemanticSegmentation(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__UpperCamelCase ) _UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertGreater(result.loss , 0.0 ) def lowercase__ ( self : Optional[Any] )->List[str]: _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) UpperCamelCase__ = ( { """feature-extraction""": SegformerModel, """image-classification""": SegformerForImageClassification, """image-segmentation""": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) UpperCamelCase__ = True UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def lowercase__ ( self : str )->Dict: _UpperCAmelCase = SegformerModelTester(self ) _UpperCAmelCase = SegformerConfigTester(self , config_class=__UpperCamelCase ) def lowercase__ ( self : Dict )->str: self.config_tester.run_common_tests() def lowercase__ ( self : Dict )->Tuple: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def lowercase__ ( self : Dict )->List[str]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*__UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*__UpperCamelCase ) @unittest.skip('''SegFormer does not use inputs_embeds''' ) def lowercase__ ( self : int )->Optional[Any]: pass @unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' ) def lowercase__ ( self : Optional[int] )->Optional[Any]: pass def lowercase__ ( self : int )->Optional[int]: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(__UpperCamelCase ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def lowercase__ ( self : List[str] )->int: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = True for model_class in self.all_model_classes: _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = True _UpperCAmelCase = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) _UpperCAmelCase = outputs.attentions _UpperCAmelCase = sum(self.model_tester.depths ) self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _UpperCAmelCase = True _UpperCAmelCase = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) _UpperCAmelCase = outputs.attentions self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) # verify the first attentions (first block, first layer) _UpperCAmelCase = (self.model_tester.image_size // 4) ** 2 _UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) _UpperCAmelCase = (self.model_tester.image_size // 3_2) ** 2 _UpperCAmelCase = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) _UpperCAmelCase = len(__UpperCamelCase ) # Check attention is always last and order is fine _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) self.assertEqual(out_len + 1 , len(__UpperCamelCase ) ) _UpperCAmelCase = outputs.attentions self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) # verify the first attentions (first block, first layer) _UpperCAmelCase = (self.model_tester.image_size // 4) ** 2 _UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def lowercase__ ( self : Optional[int] )->List[str]: def check_hidden_states_output(__UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ): _UpperCAmelCase = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = self.model_tester.num_encoder_blocks self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Tuple )->int: if not self.model_tester.is_training: return _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = True for model_class in self.all_model_classes: if model_class in get_values(__UpperCamelCase ): continue _UpperCAmelCase = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.train() _UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) _UpperCAmelCase = model(**__UpperCamelCase ).loss loss.backward() @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowercase__ ( self : Any )->Any: pass @slow def lowercase__ ( self : List[Any] )->str: for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = SegformerModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch class _a ( unittest.TestCase): """simple docstring""" @slow def lowercase__ ( self : Dict )->int: # only resize + normalize _UpperCAmelCase = SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) , keep_ratio=__UpperCamelCase , align=__UpperCamelCase , do_random_crop=__UpperCamelCase ) _UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to( __UpperCamelCase ) _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ) _UpperCAmelCase = encoded_inputs.pixel_values.to(__UpperCamelCase ) with torch.no_grad(): _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) _UpperCAmelCase = torch.tensor( [ [[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]], [[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]], [[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]], ] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __UpperCamelCase , atol=1e-4 ) ) @slow def lowercase__ ( self : str )->int: # only resize + normalize _UpperCAmelCase = SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) , keep_ratio=__UpperCamelCase , align=__UpperCamelCase , do_random_crop=__UpperCamelCase ) _UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained( '''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(__UpperCamelCase ) _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ) _UpperCAmelCase = encoded_inputs.pixel_values.to(__UpperCamelCase ) with torch.no_grad(): _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) _UpperCAmelCase = torch.tensor( [ [[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]], [[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]], [[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]], ] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __UpperCamelCase , atol=1e-1 ) ) @slow def lowercase__ ( self : Any )->Optional[int]: # only resize + normalize _UpperCAmelCase = SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) , keep_ratio=__UpperCamelCase , align=__UpperCamelCase , do_random_crop=__UpperCamelCase ) _UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to( __UpperCamelCase ) _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ) _UpperCAmelCase = encoded_inputs.pixel_values.to(__UpperCamelCase ) with torch.no_grad(): _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = outputs.logits.detach().cpu() _UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase , target_sizes=[(5_0_0, 3_0_0)] ) _UpperCAmelCase = torch.Size((5_0_0, 3_0_0) ) self.assertEqual(segmentation[0].shape , __UpperCamelCase ) _UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase ) _UpperCAmelCase = torch.Size((1_2_8, 1_2_8) ) self.assertEqual(segmentation[0].shape , __UpperCamelCase )
326
"""simple docstring""" from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) UpperCamelCase__ = ( { """feature-extraction""": TFMobileBertModel, """fill-mask""": TFMobileBertForMaskedLM, """question-answering""": TFMobileBertForQuestionAnswering, """text-classification""": TFMobileBertForSequenceClassification, """token-classification""": TFMobileBertForTokenClassification, """zero-shot""": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False def lowercase__ ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str=False )->Optional[Any]: _UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) if return_labels: if model_class in get_values(__UpperCamelCase ): _UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class _a ( lowerCAmelCase): """simple docstring""" def __init__( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=1_3 , __UpperCamelCase : Any=7 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Dict=9_9 , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Optional[Any]=3_7 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Optional[Any]=5_1_2 , __UpperCamelCase : Any=1_6 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : List[str]=None , )->Any: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope _UpperCAmelCase = embedding_size def lowercase__ ( self : Optional[int] )->int: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] )->List[Any]: _UpperCAmelCase = TFMobileBertModel(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = [input_ids, input_mask] _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowercase__ ( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->Tuple: _UpperCAmelCase = TFMobileBertForMaskedLM(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Any )->List[Any]: _UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict )->List[Any]: _UpperCAmelCase = TFMobileBertForPreTraining(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] )->Any: _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForSequenceClassification(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] )->List[str]: _UpperCAmelCase = self.num_choices _UpperCAmelCase = TFMobileBertForMultipleChoice(config=__UpperCamelCase ) _UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any )->Dict: _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForTokenClassification(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->List[Any]: _UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__UpperCamelCase ) _UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : List[str] )->Optional[Any]: _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict def lowercase__ ( self : List[Any] )->str: _UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 ) def lowercase__ ( self : List[Any] )->List[str]: self.config_tester.run_common_tests() def lowercase__ ( self : Optional[Any] )->Union[str, Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__UpperCamelCase ) def lowercase__ ( self : Any )->Union[str, Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__UpperCamelCase ) def lowercase__ ( self : List[Any] )->Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__UpperCamelCase ) def lowercase__ ( self : str )->Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__UpperCamelCase ) def lowercase__ ( self : Any )->List[str]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__UpperCamelCase ) def lowercase__ ( self : Dict )->Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__UpperCamelCase ) def lowercase__ ( self : Any )->Optional[Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__UpperCamelCase ) def lowercase__ ( self : List[str] )->Tuple: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__UpperCamelCase ) @slow def lowercase__ ( self : Tuple )->List[str]: # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _UpperCAmelCase = TFMobileBertModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) @require_tf class _a ( unittest.TestCase): """simple docstring""" @slow def lowercase__ ( self : str )->Dict: _UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' ) _UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) _UpperCAmelCase = model(__UpperCamelCase )[0] _UpperCAmelCase = [1, 6, 3_0_5_2_2] self.assertEqual(output.shape , __UpperCamelCase ) _UpperCAmelCase = tf.constant( [ [ [-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6], [-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7], [-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 )
326
1
"""simple docstring""" import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa __A : Tuple = logging.getLogger(__name__) class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """summarization""" UpperCamelCase__ = ["""loss"""] UpperCamelCase__ = ROUGE_KEYS UpperCamelCase__ = """rouge2""" def __init__( self : str , __UpperCamelCase : Any , **__UpperCamelCase : List[Any] )->List[Any]: if hparams.sortish_sampler and hparams.gpus > 1: _UpperCAmelCase = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' ) if hparams.sortish_sampler: raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' ) super().__init__(__UpperCamelCase , num_labels=__UpperCamelCase , mode=self.mode , **__UpperCamelCase ) use_task_specific_params(self.model , '''summarization''' ) save_git_info(self.hparams.output_dir ) _UpperCAmelCase = Path(self.output_dir ) / '''metrics.json''' _UpperCAmelCase = Path(self.output_dir ) / '''hparams.pkl''' pickle_save(self.hparams , self.hparams_save_path ) _UpperCAmelCase = 0 _UpperCAmelCase = defaultdict(__UpperCamelCase ) _UpperCAmelCase = self.config.model_type _UpperCAmelCase = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size _UpperCAmelCase = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } _UpperCAmelCase = { '''train''': self.hparams.n_train, '''val''': self.hparams.n_val, '''test''': self.hparams.n_test, } _UpperCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} _UpperCAmelCase = { '''train''': self.hparams.max_target_length, '''val''': self.hparams.val_max_target_length, '''test''': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}' assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) _UpperCAmelCase = get_git_info()['''repo_sha'''] _UpperCAmelCase = hparams.num_workers _UpperCAmelCase = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __UpperCamelCase ): _UpperCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang] _UpperCAmelCase = self.decoder_start_token_id _UpperCAmelCase = ( SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset ) _UpperCAmelCase = False _UpperCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: _UpperCAmelCase = self.hparams.eval_max_gen_length else: _UpperCAmelCase = self.model.config.max_length _UpperCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def lowercase__ ( self : int , __UpperCamelCase : Dict[str, torch.Tensor] )->Dict[str, List[str]]: _UpperCAmelCase = { k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items() } save_json(__UpperCamelCase , Path(self.output_dir ) / '''text_batch.json''' ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' ) _UpperCAmelCase = True return readable_batch def lowercase__ ( self : Dict , __UpperCamelCase : Optional[Any] , **__UpperCamelCase : int )->Optional[Any]: return self.model(__UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : List[int] )->Optional[Any]: _UpperCAmelCase = self.tokenizer.batch_decode( __UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase ) return lmap(str.strip , __UpperCamelCase ) def lowercase__ ( self : Tuple , __UpperCamelCase : dict )->Tuple: _UpperCAmelCase = self.tokenizer.pad_token_id _UpperCAmelCase , _UpperCAmelCase = batch['''input_ids'''], batch['''attention_mask'''] _UpperCAmelCase = batch['''labels'''] if isinstance(self.model , __UpperCamelCase ): _UpperCAmelCase = self.model._shift_right(__UpperCamelCase ) else: _UpperCAmelCase = shift_tokens_right(__UpperCamelCase , __UpperCamelCase ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero _UpperCAmelCase = decoder_input_ids self.save_readable_batch(__UpperCamelCase ) _UpperCAmelCase = self(__UpperCamelCase , attention_mask=__UpperCamelCase , decoder_input_ids=__UpperCamelCase , use_cache=__UpperCamelCase ) _UpperCAmelCase = outputs['''logits'''] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id _UpperCAmelCase = nn.CrossEntropyLoss(ignore_index=__UpperCamelCase ) assert lm_logits.shape[-1] == self.vocab_size _UpperCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: _UpperCAmelCase = nn.functional.log_softmax(__UpperCamelCase , dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = label_smoothed_nll_loss( __UpperCamelCase , __UpperCamelCase , self.hparams.label_smoothing , ignore_index=__UpperCamelCase ) return (loss,) @property def lowercase__ ( self : Union[str, Any] )->int: return self.tokenizer.pad_token_id def lowercase__ ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] )->Dict: _UpperCAmelCase = self._step(__UpperCamelCase ) _UpperCAmelCase = dict(zip(self.loss_names , __UpperCamelCase ) ) # tokens per batch _UpperCAmelCase = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum() _UpperCAmelCase = batch['''input_ids'''].shape[0] _UpperCAmelCase = batch['''input_ids'''].eq(self.pad ).sum() _UpperCAmelCase = batch['''input_ids'''].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def lowercase__ ( self : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str )->Dict: return self._generative_step(__UpperCamelCase ) def lowercase__ ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]="val" )->Dict: self.step_count += 1 _UpperCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} _UpperCAmelCase = losses['''loss'''] _UpperCAmelCase = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len'''] } _UpperCAmelCase = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) _UpperCAmelCase = torch.tensor(__UpperCamelCase ).type_as(__UpperCamelCase ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(__UpperCamelCase ) _UpperCAmelCase = {F'{prefix}_avg_{k}': x for k, x in losses.items()} _UpperCAmelCase = self.step_count self.metrics[prefix].append(__UpperCamelCase ) # callback writes this to self.metrics_save_path _UpperCAmelCase = flatten_list([x['''preds'''] for x in outputs] ) return { "log": all_metrics, "preds": preds, F'{prefix}_loss': loss, F'{prefix}_{self.val_metric}': metric_tensor, } def lowercase__ ( self : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict )->Dict: return calculate_rouge(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : str , __UpperCamelCase : dict )->dict: _UpperCAmelCase = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') _UpperCAmelCase = self.model.generate( batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=__UpperCamelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) _UpperCAmelCase = (time.time() - ta) / batch['''input_ids'''].shape[0] _UpperCAmelCase = self.ids_to_clean_text(__UpperCamelCase ) _UpperCAmelCase = self.ids_to_clean_text(batch['''labels'''] ) _UpperCAmelCase = self._step(__UpperCamelCase ) _UpperCAmelCase = dict(zip(self.loss_names , __UpperCamelCase ) ) _UpperCAmelCase = self.calc_generative_metrics(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = np.mean(lmap(__UpperCamelCase , __UpperCamelCase ) ) base_metrics.update(gen_time=__UpperCamelCase , gen_len=__UpperCamelCase , preds=__UpperCamelCase , target=__UpperCamelCase , **__UpperCamelCase ) return base_metrics def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str )->List[str]: return self._generative_step(__UpperCamelCase ) def lowercase__ ( self : List[str] , __UpperCamelCase : Optional[int] )->Optional[Any]: return self.validation_epoch_end(__UpperCamelCase , prefix='''test''' ) def lowercase__ ( self : str , __UpperCamelCase : int )->SeqaSeqDataset: _UpperCAmelCase = self.n_obs[type_path] _UpperCAmelCase = self.target_lens[type_path] _UpperCAmelCase = self.dataset_class( self.tokenizer , type_path=__UpperCamelCase , n_obs=__UpperCamelCase , max_target_length=__UpperCamelCase , **self.dataset_kwargs , ) return dataset def lowercase__ ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : bool = False )->DataLoader: _UpperCAmelCase = self.get_dataset(__UpperCamelCase ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": _UpperCAmelCase = dataset.make_sortish_sampler(__UpperCamelCase , distributed=self.hparams.gpus > 1 ) return DataLoader( __UpperCamelCase , batch_size=__UpperCamelCase , collate_fn=dataset.collate_fn , shuffle=__UpperCamelCase , num_workers=self.num_workers , sampler=__UpperCamelCase , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": _UpperCAmelCase = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( __UpperCamelCase , batch_sampler=__UpperCamelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( __UpperCamelCase , batch_size=__UpperCamelCase , collate_fn=dataset.collate_fn , shuffle=__UpperCamelCase , num_workers=self.num_workers , sampler=__UpperCamelCase , ) def lowercase__ ( self : Dict )->DataLoader: _UpperCAmelCase = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=__UpperCamelCase ) return dataloader def lowercase__ ( self : List[Any] )->DataLoader: return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size ) def lowercase__ ( self : List[Any] )->DataLoader: return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size ) @staticmethod def lowercase__ ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] )->Optional[Any]: BaseTransformer.add_model_specific_args(__UpperCamelCase , __UpperCamelCase ) add_generic_args(__UpperCamelCase , __UpperCamelCase ) parser.add_argument( '''--max_source_length''' , default=1_0_2_4 , type=__UpperCamelCase , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--max_target_length''' , default=5_6 , type=__UpperCamelCase , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--val_max_target_length''' , default=1_4_2 , type=__UpperCamelCase , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--test_max_target_length''' , default=1_4_2 , type=__UpperCamelCase , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument('''--freeze_encoder''' , action='''store_true''' ) parser.add_argument('''--freeze_embeds''' , action='''store_true''' ) parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=__UpperCamelCase ) parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=__UpperCamelCase ) parser.add_argument('''--max_tokens_per_batch''' , type=__UpperCamelCase , default=__UpperCamelCase ) parser.add_argument('''--logger_name''' , type=__UpperCamelCase , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' ) parser.add_argument('''--n_train''' , type=__UpperCamelCase , default=-1 , required=__UpperCamelCase , help='''# examples. -1 means use all.''' ) parser.add_argument('''--n_val''' , type=__UpperCamelCase , default=5_0_0 , required=__UpperCamelCase , help='''# examples. -1 means use all.''' ) parser.add_argument('''--n_test''' , type=__UpperCamelCase , default=-1 , required=__UpperCamelCase , help='''# examples. -1 means use all.''' ) parser.add_argument( '''--task''' , type=__UpperCamelCase , default='''summarization''' , required=__UpperCamelCase , help='''# examples. -1 means use all.''' ) parser.add_argument('''--label_smoothing''' , type=__UpperCamelCase , default=0.0 , required=__UpperCamelCase ) parser.add_argument('''--src_lang''' , type=__UpperCamelCase , default='''''' , required=__UpperCamelCase ) parser.add_argument('''--tgt_lang''' , type=__UpperCamelCase , default='''''' , required=__UpperCamelCase ) parser.add_argument('''--eval_beams''' , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase ) parser.add_argument( '''--val_metric''' , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase , choices=['''bleu''', '''rouge2''', '''loss''', None] ) parser.add_argument('''--eval_max_gen_length''' , type=__UpperCamelCase , default=__UpperCamelCase , help='''never generate more than n tokens''' ) parser.add_argument('''--save_top_k''' , type=__UpperCamelCase , default=1 , required=__UpperCamelCase , help='''How many checkpoints to save''' ) parser.add_argument( '''--early_stopping_patience''' , type=__UpperCamelCase , default=-1 , required=__UpperCamelCase , help=( '''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So''' ''' val_check_interval will effect it.''' ) , ) return parser class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """translation""" UpperCamelCase__ = ["""loss"""] UpperCamelCase__ = ["""bleu"""] UpperCamelCase__ = """bleu""" def __init__( self : Any , __UpperCamelCase : List[str] , **__UpperCamelCase : List[str] )->Dict: super().__init__(__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = hparams.src_lang _UpperCAmelCase = hparams.tgt_lang def lowercase__ ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : int )->dict: return calculate_bleu(__UpperCamelCase , __UpperCamelCase ) def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any]=None ): '''simple docstring''' Path(args.output_dir ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) check_output_dir(_SCREAMING_SNAKE_CASE , expected_items=3 ) if model is None: if "summarization" in args.task: _UpperCAmelCase = SummarizationModule(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = TranslationModule(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith('''/tmp''' ) or str(args.output_dir ).startswith('''/var''' ) ): _UpperCAmelCase = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger _UpperCAmelCase = os.environ.get('''WANDB_PROJECT''' , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = WandbLogger(name=model.output_dir.name , project=_SCREAMING_SNAKE_CASE ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger _UpperCAmelCase = WandbLogger(name=model.output_dir.name , project=f'hf_{dataset}' ) if args.early_stopping_patience >= 0: _UpperCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: _UpperCAmelCase = False _UpperCAmelCase = args.val_metric == '''loss''' _UpperCAmelCase = generic_train( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , _SCREAMING_SNAKE_CASE ) , early_stopping_callback=_SCREAMING_SNAKE_CASE , logger=_SCREAMING_SNAKE_CASE , ) pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' ) if not args.do_predict: return model _UpperCAmelCase = '''''' _UpperCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=_SCREAMING_SNAKE_CASE ) ) if checkpoints: _UpperCAmelCase = checkpoints[-1] _UpperCAmelCase = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": __A : str = argparse.ArgumentParser() __A : str = pl.Trainer.add_argparse_args(parser) __A : Optional[int] = SummarizationModule.add_model_specific_args(parser, os.getcwd()) __A : str = parser.parse_args() main(args)
326
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('''Input value must be a \'int\' type''' ) return bin(_SCREAMING_SNAKE_CASE ).count('''1''' ) if __name__ == "__main__": import doctest doctest.testmod()
326
1