code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class snake_case__( a__ ): '''simple docstring''' def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : int = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''hidden_sizes''' ) ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''num_attention_heads''' ) ) class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=6_4 , __lowercase=3 , __lowercase=3 , __lowercase=2 , __lowercase=1 , __lowercase=1_6 , __lowercase=[1_2_8, 2_5_6, 3_8_4] , __lowercase=[4, 6, 8] , __lowercase=[2, 3, 4] , __lowercase=[1_6, 1_6, 1_6] , __lowercase=0 , __lowercase=[2, 2, 2] , __lowercase=[2, 2, 2] , __lowercase=0.02 , __lowercase=True , __lowercase=True , __lowercase=2 , ) -> Union[str, Any]: lowerCAmelCase_ : List[str] = parent lowerCAmelCase_ : List[str] = batch_size lowerCAmelCase_ : Union[str, Any] = image_size lowerCAmelCase_ : str = num_channels lowerCAmelCase_ : Optional[Any] = kernel_size lowerCAmelCase_ : Optional[int] = stride lowerCAmelCase_ : List[Any] = padding lowerCAmelCase_ : Tuple = hidden_sizes lowerCAmelCase_ : Any = num_attention_heads lowerCAmelCase_ : Union[str, Any] = depths lowerCAmelCase_ : int = key_dim lowerCAmelCase_ : Union[str, Any] = drop_path_rate lowerCAmelCase_ : Optional[Any] = patch_size lowerCAmelCase_ : int = attention_ratio lowerCAmelCase_ : str = mlp_ratio lowerCAmelCase_ : Any = initializer_range lowerCAmelCase_ : Union[str, Any] = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] lowerCAmelCase_ : Dict = is_training lowerCAmelCase_ : Tuple = use_labels lowerCAmelCase_ : Any = num_labels lowerCAmelCase_ : Any = initializer_range def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase_ : Tuple = None if self.use_labels: lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels ) lowerCAmelCase_ : Union[str, Any] = self.get_config() return config, pixel_values, labels def lowercase_ ( self ) -> Tuple: return LevitConfig( image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> int: lowerCAmelCase_ : Union[str, Any] = LevitModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() lowerCAmelCase_ : List[str] = model(lowerCAmelCase__ ) lowerCAmelCase_ : int = (self.image_size, self.image_size) lowerCAmelCase_ : Dict = image_size[0], image_size[1] for _ in range(4 ): lowerCAmelCase_ : int = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) lowerCAmelCase_ : Optional[int] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> int: lowerCAmelCase_ : Optional[int] = self.num_labels lowerCAmelCase_ : str = LevitForImageClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() lowerCAmelCase_ : Optional[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : List[str] = self.prepare_config_and_inputs() lowerCAmelCase_ : Optional[Any] = config_and_inputs lowerCAmelCase_ : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class snake_case__( a__, a__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ : Dict = ( { """feature-extraction""": LevitModel, """image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = False SCREAMING_SNAKE_CASE__ : int = False SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : int = False SCREAMING_SNAKE_CASE__ : str = False def lowercase_ ( self ) -> Any: lowerCAmelCase_ : List[Any] = LevitModelTester(self ) lowerCAmelCase_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 ) def lowercase_ ( self ) -> Optional[int]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase_ ( self ) -> Tuple: return @unittest.skip(reason='''Levit does not use inputs_embeds''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='''Levit does not support input and output embeddings''' ) def lowercase_ ( self ) -> Dict: pass @unittest.skip(reason='''Levit does not output attentions''' ) def lowercase_ ( self ) -> Any: pass def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : List[Any] = model_class(lowerCAmelCase__ ) lowerCAmelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : Optional[int] = [*signature.parameters.keys()] lowerCAmelCase_ : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def lowercase_ ( self ) -> int: def check_hidden_states_output(__lowercase , __lowercase , __lowercase ): lowerCAmelCase_ : Optional[Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): lowerCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) lowerCAmelCase_ : Tuple = outputs.hidden_states lowerCAmelCase_ : Any = len(self.model_tester.depths ) + 1 self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) lowerCAmelCase_ : List[Any] = (self.model_tester.image_size, self.model_tester.image_size) lowerCAmelCase_ : Optional[int] = image_size[0], image_size[1] for _ in range(4 ): lowerCAmelCase_ : Optional[int] = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) lowerCAmelCase_ : str = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [ height * width, self.model_tester.hidden_sizes[0], ] , ) lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : List[str] = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase_ : List[Any] = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowercase_ ( self ) -> List[Any]: pass def lowercase_ ( self , __lowercase , __lowercase , __lowercase=False ) -> Union[str, Any]: lowerCAmelCase_ : Optional[int] = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def lowercase_ ( self ) -> str: lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ ) def lowercase_ ( self ) -> Optional[Any]: if not self.model_tester.is_training: return lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ : List[Any] = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(lowerCAmelCase__ ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue lowerCAmelCase_ : Any = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.train() lowerCAmelCase_ : int = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) lowerCAmelCase_ : Union[str, Any] = model(**lowerCAmelCase__ ).loss loss.backward() def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return lowerCAmelCase_ : Any = False lowerCAmelCase_ : str = True for model_class in self.all_model_classes: if model_class in get_values(lowerCAmelCase__ ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue lowerCAmelCase_ : Any = model_class(lowerCAmelCase__ ) model.gradient_checkpointing_enable() model.to(lowerCAmelCase__ ) model.train() lowerCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) lowerCAmelCase_ : Any = model(**lowerCAmelCase__ ).loss loss.backward() def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ : str = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(lowerCAmelCase__ ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ): lowerCAmelCase_ : str = problem_type["title"] lowerCAmelCase_ : int = problem_type["num_labels"] lowerCAmelCase_ : Union[str, Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.train() lowerCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) if problem_type["num_labels"] > 1: lowerCAmelCase_ : Optional[Any] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] ) lowerCAmelCase_ : Optional[Any] = inputs["labels"].to(problem_type['''dtype'''] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=lowerCAmelCase__ ) as warning_list: lowerCAmelCase_ : Optional[int] = model(**lowerCAmelCase__ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def lowercase_ ( self ) -> Dict: for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ : List[str] = LevitModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def lowerCAmelCase ( )-> List[str]: lowerCAmelCase_ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class snake_case__( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase_ ( self ) -> Union[str, Any]: return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Optional[int] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( lowerCAmelCase__ ) lowerCAmelCase_ : Optional[Any] = self.default_image_processor lowerCAmelCase_ : int = prepare_img() lowerCAmelCase_ : Any = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ ) # forward pass with torch.no_grad(): lowerCAmelCase_ : Optional[Any] = model(**lowerCAmelCase__ ) # verify the logits lowerCAmelCase_ : Dict = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) lowerCAmelCase_ : Optional[Any] = torch.tensor([1.04_48, -0.37_45, -1.83_17] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
709
_UpperCAmelCase : int =frozenset( [ """prompt""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""]) _UpperCAmelCase : Dict =frozenset([]) _UpperCAmelCase : int =frozenset(["""image"""]) _UpperCAmelCase : Tuple =frozenset( [ """image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : int =frozenset(["""image"""]) _UpperCAmelCase : str =frozenset( [ """prompt""", """image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""]) _UpperCAmelCase : Optional[int] =frozenset( [ # Text guided image variation with an image mask """prompt""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""]) _UpperCAmelCase : Optional[Any] =frozenset( [ # image variation with an image mask """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""]) _UpperCAmelCase : Union[str, Any] =frozenset( [ """example_image""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""]) _UpperCAmelCase : Any =frozenset(["""class_labels"""]) _UpperCAmelCase : List[Any] =frozenset(["""class_labels"""]) _UpperCAmelCase : int =frozenset(["""batch_size"""]) _UpperCAmelCase : str =frozenset([]) _UpperCAmelCase : str =frozenset(["""batch_size"""]) _UpperCAmelCase : Optional[Any] =frozenset([]) _UpperCAmelCase : Tuple =frozenset( [ """prompt""", """audio_length_in_s""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""]) _UpperCAmelCase : List[str] =frozenset(["""input_tokens"""]) _UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""])
619
0
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class snake_case__( unittest.TestCase ): '''simple docstring''' def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=9_9 , __lowercase=3_2 , __lowercase=5 , __lowercase=4 , __lowercase=3_7 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=1_6 , __lowercase=2 , __lowercase=0.02 , __lowercase=4 , ) -> Union[str, Any]: lowerCAmelCase_ : List[str] = parent lowerCAmelCase_ : Any = batch_size lowerCAmelCase_ : List[str] = seq_length lowerCAmelCase_ : Tuple = is_training lowerCAmelCase_ : str = use_attention_mask lowerCAmelCase_ : Tuple = use_token_type_ids lowerCAmelCase_ : str = use_labels lowerCAmelCase_ : List[Any] = vocab_size lowerCAmelCase_ : List[Any] = hidden_size lowerCAmelCase_ : List[Any] = num_hidden_layers lowerCAmelCase_ : Optional[int] = num_attention_heads lowerCAmelCase_ : Union[str, Any] = intermediate_size lowerCAmelCase_ : str = hidden_act lowerCAmelCase_ : Dict = hidden_dropout_prob lowerCAmelCase_ : Dict = attention_probs_dropout_prob lowerCAmelCase_ : List[str] = max_position_embeddings lowerCAmelCase_ : Dict = type_vocab_size lowerCAmelCase_ : Optional[Any] = type_sequence_label_size lowerCAmelCase_ : List[str] = initializer_range lowerCAmelCase_ : Tuple = num_choices def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ : List[str] = None if self.use_attention_mask: lowerCAmelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase_ : Dict = None if self.use_token_type_ids: lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase_ : Optional[int] = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : Dict = self.prepare_config_and_inputs() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = config_and_inputs lowerCAmelCase_ : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : List[str] = self.prepare_config_and_inputs() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = config_and_inputs lowerCAmelCase_ : Optional[int] = True lowerCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class snake_case__( UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = True SCREAMING_SNAKE_CASE__ : Tuple = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def lowercase_ ( self ) -> str: lowerCAmelCase_ : str = FlaxRobertaModelTester(self ) @slow def lowercase_ ( self ) -> Any: for model_class_name in self.all_model_classes: lowerCAmelCase_ : str = model_class_name.from_pretrained('''roberta-base''' , from_pt=__lowerCAmelCase ) lowerCAmelCase_ : str = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCAmelCase )
710
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int: lowerCAmelCase_ : Dict = 1 lowerCAmelCase_ : List[Any] = 1 lowerCAmelCase_ : Optional[Any] = {1: 1} for inputa in range(2 , lowerCAmelCase_ ): lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Dict = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCAmelCase_ : Any = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCAmelCase_ : Tuple = counter if counter > pre_counter: lowerCAmelCase_ : Optional[int] = inputa lowerCAmelCase_ : Union[str, Any] = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
619
0
from collections.abc import Sequence from queue import Queue class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None ) -> Dict: lowerCAmelCase_ : Optional[int] = start lowerCAmelCase_ : List[str] = end lowerCAmelCase_ : Optional[Any] = val lowerCAmelCase_ : Optional[Any] = (start + end) // 2 lowerCAmelCase_ : str = left lowerCAmelCase_ : str = right def __repr__( self ) -> str: return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})""" class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase ) -> Tuple: lowerCAmelCase_ : str = collection lowerCAmelCase_ : Tuple = function if self.collection: lowerCAmelCase_ : Tuple = self._build_tree(0 , len(UpperCAmelCase_ ) - 1 ) def lowercase_ ( self , __lowercase , __lowercase ) -> Union[str, Any]: self._update_tree(self.root , UpperCAmelCase_ , UpperCAmelCase_ ) def lowercase_ ( self , __lowercase , __lowercase ) -> List[Any]: return self._query_range(self.root , UpperCAmelCase_ , UpperCAmelCase_ ) def lowercase_ ( self , __lowercase , __lowercase ) -> Tuple: if start == end: return SegmentTreeNode(UpperCAmelCase_ , UpperCAmelCase_ , self.collection[start] ) lowerCAmelCase_ : Optional[Any] = (start + end) // 2 lowerCAmelCase_ : int = self._build_tree(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase_ : Optional[Any] = self._build_tree(mid + 1 , UpperCAmelCase_ ) return SegmentTreeNode(UpperCAmelCase_ , UpperCAmelCase_ , self.fn(left.val , right.val ) , UpperCAmelCase_ , UpperCAmelCase_ ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> str: if node.start == i and node.end == i: lowerCAmelCase_ : List[str] = val return if i <= node.mid: self._update_tree(node.left , UpperCAmelCase_ , UpperCAmelCase_ ) else: self._update_tree(node.right , UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase_ : List[str] = self.fn(node.left.val , node.right.val ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Dict: if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , UpperCAmelCase_ , UpperCAmelCase_ ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , UpperCAmelCase_ , node.mid ) , self._query_range(node.right , node.mid + 1 , UpperCAmelCase_ ) , ) else: # range in right child tree return self._query_range(node.right , UpperCAmelCase_ , UpperCAmelCase_ ) def lowercase_ ( self ) -> Any: if self.root is not None: lowerCAmelCase_ : Union[str, Any] = Queue() queue.put(self.root ) while not queue.empty(): lowerCAmelCase_ : Optional[Any] = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print("""*""" * 50) _UpperCAmelCase : List[Any] =SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
711
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : str =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder""" SCREAMING_SNAKE_CASE__ : str = True def __init__( self , **__lowercase ) -> Union[str, Any]: super().__init__(**__lowercase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" lowerCAmelCase_ : str = kwargs.pop('''encoder''' ) lowerCAmelCase_ : int = encoder_config.pop('''model_type''' ) lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' ) lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase ) lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase ) lowerCAmelCase_ : Any = True @classmethod def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig: logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowerCAmelCase_ : int = True lowerCAmelCase_ : List[Any] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ : List[str] = self.encoder.to_dict() lowerCAmelCase_ : Dict = self.decoder.to_dict() lowerCAmelCase_ : Optional[Any] = self.__class__.model_type return output
619
0
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class snake_case__( UpperCamelCase_ ): '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase ) -> Optional[int]: lowerCAmelCase_ : Union[str, Any] = dataset lowerCAmelCase_ : Optional[Any] = process lowerCAmelCase_ : Union[str, Any] = params def __len__( self ) -> Tuple: return len(self.dataset ) def __getitem__( self , __lowercase ) -> Union[str, Any]: lowerCAmelCase_ : List[Any] = self.dataset[i] lowerCAmelCase_ : Optional[int] = self.process(UpperCamelCase__ , **self.params ) return processed class snake_case__( UpperCamelCase_ ): '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase=None ) -> List[Any]: lowerCAmelCase_ : List[Any] = loader lowerCAmelCase_ : List[Any] = infer lowerCAmelCase_ : int = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether lowerCAmelCase_ : Optional[int] = None lowerCAmelCase_ : List[str] = loader_batch_size # Internal bookkeeping lowerCAmelCase_ : Optional[int] = None lowerCAmelCase_ : int = None def __len__( self ) -> Tuple: return len(self.loader ) def __iter__( self ) -> Optional[int]: lowerCAmelCase_ : Optional[int] = iter(self.loader ) return self def lowercase_ ( self ) -> List[str]: if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice lowerCAmelCase_ : Optional[Any] = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) lowerCAmelCase_ : Union[str, Any] = {} for k, element in self._loader_batch_data.items(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # Convert ModelOutput to tuple first lowerCAmelCase_ : Dict = element.to_tuple() if isinstance(element[0] , torch.Tensor ): lowerCAmelCase_ : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCAmelCase_ : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): lowerCAmelCase_ : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): lowerCAmelCase_ : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around lowerCAmelCase_ : int = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCAmelCase_ : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCAmelCase_ : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. lowerCAmelCase_ : Tuple = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 lowerCAmelCase_ : Any = self._loader_batch_data.__class__(UpperCamelCase__ ) self._loader_batch_index += 1 return result def lowercase_ ( self ) -> Tuple: if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch lowerCAmelCase_ : Tuple = next(self.iterator ) lowerCAmelCase_ : List[Any] = self.infer(UpperCamelCase__ , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(UpperCamelCase__ , torch.Tensor ): lowerCAmelCase_ : Optional[int] = processed else: lowerCAmelCase_ : int = list(processed.keys() )[0] lowerCAmelCase_ : int = processed[key] if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCAmelCase_ : List[Any] = len(UpperCamelCase__ ) else: lowerCAmelCase_ : Dict = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCAmelCase_ : List[Any] = observed_batch_size # Setting internal index to unwrap the batch lowerCAmelCase_ : List[Any] = processed lowerCAmelCase_ : int = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class snake_case__( UpperCamelCase_ ): '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase=None ) -> Any: super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def __iter__( self ) -> List[Any]: lowerCAmelCase_ : Tuple = iter(self.loader ) lowerCAmelCase_ : List[Any] = None return self def lowercase_ ( self ) -> Dict: if self.subiterator is None: lowerCAmelCase_ : Dict = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item lowerCAmelCase_ : Any = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators lowerCAmelCase_ : Optional[Any] = self.infer(next(self.iterator ) , **self.params ) lowerCAmelCase_ : Union[str, Any] = next(self.subiterator ) return processed class snake_case__( UpperCamelCase_ ): '''simple docstring''' def __iter__( self ) -> Dict: lowerCAmelCase_ : Dict = iter(self.loader ) return self def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : List[Any] = False lowerCAmelCase_ : Optional[int] = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: lowerCAmelCase_ : Tuple = self.loader_batch_item() lowerCAmelCase_ : Any = item.pop('''is_last''' ) accumulator.append(UpperCamelCase__ ) if is_last: return accumulator while not is_last: lowerCAmelCase_ : Any = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(UpperCamelCase__ , torch.Tensor ): lowerCAmelCase_ : Tuple = processed else: lowerCAmelCase_ : Union[str, Any] = list(processed.keys() )[0] lowerCAmelCase_ : List[str] = processed[key] if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCAmelCase_ : List[Any] = len(UpperCamelCase__ ) else: lowerCAmelCase_ : int = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCAmelCase_ : List[str] = observed_batch_size lowerCAmelCase_ : List[Any] = processed lowerCAmelCase_ : str = 0 while self._loader_batch_index < self.loader_batch_size: lowerCAmelCase_ : Any = self.loader_batch_item() lowerCAmelCase_ : List[Any] = item.pop('''is_last''' ) accumulator.append(UpperCamelCase__ ) if is_last: return accumulator else: lowerCAmelCase_ : int = processed lowerCAmelCase_ : List[str] = item.pop('''is_last''' ) accumulator.append(UpperCamelCase__ ) return accumulator class snake_case__( UpperCamelCase_ ): '''simple docstring''' def __init__( self , __lowercase , __lowercase ) -> Optional[int]: lowerCAmelCase_ : Optional[Any] = dataset lowerCAmelCase_ : Dict = key def __len__( self ) -> int: return len(self.dataset ) def __getitem__( self , __lowercase ) -> Dict: return self.dataset[i][self.key] class snake_case__( UpperCamelCase_ ): '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase ) -> Optional[Any]: lowerCAmelCase_ : str = dataset lowerCAmelCase_ : List[str] = keya lowerCAmelCase_ : Tuple = keya def __len__( self ) -> Any: return len(self.dataset ) def __getitem__( self , __lowercase ) -> Dict: return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
712
from __future__ import annotations from math import pi def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]: if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if inductance < 0: raise ValueError('''Inductance cannot be negative''' ) if frequency < 0: raise ValueError('''Frequency cannot be negative''' ) if reactance < 0: raise ValueError('''Inductive reactance cannot be negative''' ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
619
0
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder _UpperCAmelCase : Union[str, Any] =logging.get_logger(__name__) # pylint: disable=invalid-name _UpperCAmelCase : Optional[Any] =256 class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = ["""melgan"""] def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> None: super().__init__() # From MELGAN lowerCAmelCase_ : Optional[Any] = math.log(1e-5 ) # Matches MelGAN training. lowerCAmelCase_ : List[str] = 4.0 # Largest value for most examples lowerCAmelCase_ : Union[str, Any] = 1_2_8 self.register_modules( notes_encoder=_SCREAMING_SNAKE_CASE , continuous_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , melgan=_SCREAMING_SNAKE_CASE , ) def lowercase_ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = output_range if clip: lowerCAmelCase_ : Any = torch.clip(_SCREAMING_SNAKE_CASE , self.min_value , self.max_value ) # Scale to [0, 1]. lowerCAmelCase_ : int = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def lowercase_ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = input_range lowerCAmelCase_ : Union[str, Any] = torch.clip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if clip else outputs # Scale to [0, 1]. lowerCAmelCase_ : Any = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]: lowerCAmelCase_ : List[str] = input_tokens > 0 lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.notes_encoder( encoder_input_tokens=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE ) lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.continuous_encoder( encoder_inputs=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> str: lowerCAmelCase_ : Optional[int] = noise_time if not torch.is_tensor(_SCREAMING_SNAKE_CASE ): lowerCAmelCase_ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(_SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0: lowerCAmelCase_ : str = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowerCAmelCase_ : Optional[int] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowerCAmelCase_ : Dict = self.decoder( encodings_and_masks=_SCREAMING_SNAKE_CASE , decoder_input_tokens=_SCREAMING_SNAKE_CASE , decoder_noise_time=_SCREAMING_SNAKE_CASE ) return logits @torch.no_grad() def __call__( self , __lowercase , __lowercase = None , __lowercase = 1_0_0 , __lowercase = True , __lowercase = "numpy" , __lowercase = None , __lowercase = 1 , ) -> Union[AudioPipelineOutput, Tuple]: if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(_SCREAMING_SNAKE_CASE )}.""" ) lowerCAmelCase_ : Any = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowerCAmelCase_ : Any = np.zeros([1, 0, self.n_dims] , np.floataa ) lowerCAmelCase_ : int = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device ) for i, encoder_input_tokens in enumerate(_SCREAMING_SNAKE_CASE ): if i == 0: lowerCAmelCase_ : Optional[int] = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowerCAmelCase_ : int = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowerCAmelCase_ : Optional[int] = ones lowerCAmelCase_ : Any = self.scale_features( _SCREAMING_SNAKE_CASE , output_range=[-1.0, 1.0] , clip=_SCREAMING_SNAKE_CASE ) lowerCAmelCase_ : Optional[Any] = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_SCREAMING_SNAKE_CASE , continuous_mask=_SCREAMING_SNAKE_CASE , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowerCAmelCase_ : Dict = randn_tensor( shape=encoder_continuous_inputs.shape , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowerCAmelCase_ : Any = self.decode( encodings_and_masks=_SCREAMING_SNAKE_CASE , input_tokens=_SCREAMING_SNAKE_CASE , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowerCAmelCase_ : List[str] = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample lowerCAmelCase_ : Optional[int] = self.scale_to_features(_SCREAMING_SNAKE_CASE , input_range=[-1.0, 1.0] ) lowerCAmelCase_ : Any = mel[:1] lowerCAmelCase_ : int = mel.cpu().float().numpy() lowerCAmelCase_ : Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) logger.info('''Generated segment''' , _SCREAMING_SNAKE_CASE ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( '''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' ) elif output_type == "numpy" and self.melgan is None: raise ValueError( '''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' ) if output_type == "numpy": lowerCAmelCase_ : Tuple = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowerCAmelCase_ : str = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=_SCREAMING_SNAKE_CASE )
713
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging _UpperCAmelCase : Tuple =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = """linear""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine""" SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts""" SCREAMING_SNAKE_CASE__ : List[str] = """polynomial""" SCREAMING_SNAKE_CASE__ : Dict = """constant""" SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup""" SCREAMING_SNAKE_CASE__ : str = """piecewise_constant""" def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple: return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) ) return 1.0 return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int: lowerCAmelCase_ : Optional[int] = {} lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' ) for rule_str in rule_list[:-1]: lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' ) lowerCAmelCase_ : int = int(lowerCAmelCase_ ) lowerCAmelCase_ : str = float(lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = value lowerCAmelCase_ : int = float(rule_list[-1] ) def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ): def rule_func(lowerCAmelCase_ ) -> float: lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(lowerCAmelCase_ ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any: lowerCAmelCase_ : Dict = optimizer.defaults['''lr'''] if not (lr_init > lr_end): raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lowerCAmelCase_ : List[Any] = lr_init - lr_end lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _UpperCAmelCase : Union[str, Any] ={ SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]: lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ ) lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , ) return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
619
0
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=7 , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase=False , __lowercase=1_9 , __lowercase=3_2 , __lowercase=5 , __lowercase=4 , __lowercase=3_7 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=1_6 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , ) -> Optional[Any]: lowerCAmelCase_ : Dict = parent lowerCAmelCase_ : Tuple = batch_size lowerCAmelCase_ : List[Any] = seq_length lowerCAmelCase_ : int = is_training lowerCAmelCase_ : Any = use_input_mask lowerCAmelCase_ : List[str] = use_token_type_ids lowerCAmelCase_ : List[Any] = use_labels lowerCAmelCase_ : Tuple = vocab_size lowerCAmelCase_ : Any = hidden_size lowerCAmelCase_ : Optional[int] = num_hidden_layers lowerCAmelCase_ : Optional[int] = num_attention_heads lowerCAmelCase_ : Optional[Any] = intermediate_size lowerCAmelCase_ : List[Any] = hidden_act lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase_ : List[Any] = max_position_embeddings lowerCAmelCase_ : Union[str, Any] = type_vocab_size lowerCAmelCase_ : Optional[int] = type_sequence_label_size lowerCAmelCase_ : Optional[Any] = initializer_range lowerCAmelCase_ : Any = num_labels lowerCAmelCase_ : Dict = num_choices lowerCAmelCase_ : Any = scope def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ : Optional[int] = None if self.use_input_mask: lowerCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase_ : Any = None lowerCAmelCase_ : Dict = None lowerCAmelCase_ : int = None if self.use_labels: lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase_ : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : str = EsmConfig( vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__lowercase , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , ) return config def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[Any]: lowerCAmelCase_ : Union[str, Any] = EsmForProteinFolding(config=__lowercase ).float() model.to(__lowercase ) model.eval() lowerCAmelCase_ : Union[str, Any] = model(__lowercase , attention_mask=__lowercase ) lowerCAmelCase_ : str = model(__lowercase ) lowerCAmelCase_ : Optional[Any] = model(__lowercase ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Dict = self.prepare_config_and_inputs() ( lowerCAmelCase_ ) : Any = config_and_inputs lowerCAmelCase_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : List[str] = (EsmForProteinFolding,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : List[str] = () SCREAMING_SNAKE_CASE__ : List[Any] = {} if is_torch_available() else {} SCREAMING_SNAKE_CASE__ : Tuple = False def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Tuple = EsmFoldModelTester(self ) lowerCAmelCase_ : Tuple = ConfigTester(self , config_class=__lowercase , hidden_size=3_7 ) def lowercase_ ( self ) -> str: self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) @unittest.skip('''Does not support attention outputs''' ) def lowercase_ ( self ) -> Optional[int]: pass @unittest.skip def lowercase_ ( self ) -> List[Any]: pass @unittest.skip('''Esm does not support embedding resizing''' ) def lowercase_ ( self ) -> List[Any]: pass @unittest.skip('''Esm does not support embedding resizing''' ) def lowercase_ ( self ) -> int: pass @unittest.skip('''ESMFold does not support passing input embeds!''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def lowercase_ ( self ) -> List[str]: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def lowercase_ ( self ) -> List[Any]: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def lowercase_ ( self ) -> Union[str, Any]: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def lowercase_ ( self ) -> Dict: pass @unittest.skip('''ESMFold does not support head pruning.''' ) def lowercase_ ( self ) -> Union[str, Any]: pass @unittest.skip('''ESMFold does not output hidden states in the normal way.''' ) def lowercase_ ( self ) -> str: pass @unittest.skip('''ESMfold does not output hidden states in the normal way.''' ) def lowercase_ ( self ) -> List[str]: pass @unittest.skip('''ESMFold only has one output format.''' ) def lowercase_ ( self ) -> Optional[int]: pass @unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' ) def lowercase_ ( self ) -> Optional[int]: pass @unittest.skip('''ESMFold does not support input chunking.''' ) def lowercase_ ( self ) -> List[str]: pass @unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' ) def lowercase_ ( self ) -> Optional[int]: pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def lowercase_ ( self ) -> Dict: pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def lowercase_ ( self ) -> int: pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def lowercase_ ( self ) -> List[Any]: pass @unittest.skip('''ESMFold doesn\'t support data parallel.''' ) def lowercase_ ( self ) -> List[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowercase_ ( self ) -> Optional[Any]: pass @require_torch class snake_case__( UpperCAmelCase__ ): '''simple docstring''' @slow def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : List[str] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float() model.eval() lowerCAmelCase_ : str = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] ) lowerCAmelCase_ : Optional[int] = model(__lowercase )["""positions"""] lowerCAmelCase_ : List[str] = torch.tensor([2.58_28, 0.79_93, -1_0.9_3_3_4] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __lowercase , atol=1e-4 ) )
714
from __future__ import annotations def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative in a semiconductor''' ) elif hole_conc < 0: raise ValueError('''Hole concentration cannot be negative in a semiconductor''' ) elif intrinsic_conc < 0: raise ValueError( '''Intrinsic concentration cannot be negative in a semiconductor''' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
619
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Dict =logging.get_logger(__name__) _UpperCAmelCase : Optional[Any] ={ """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class snake_case__( lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = """gptsan-japanese""" SCREAMING_SNAKE_CASE__ : str = [ """past_key_values""", ] SCREAMING_SNAKE_CASE__ : Union[str, Any] = { """hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , __lowercase=3_6_0_0_0 , __lowercase=1_2_8_0 , __lowercase=1_0_2_4 , __lowercase=8_1_9_2 , __lowercase=4_0_9_6 , __lowercase=1_2_8 , __lowercase=1_0 , __lowercase=0 , __lowercase=1_6 , __lowercase=1_6 , __lowercase=1_2_8 , __lowercase=0.0 , __lowercase=1e-5 , __lowercase=False , __lowercase=0.0 , __lowercase="float32" , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=0.0_02 , __lowercase=False , __lowercase=True , __lowercase=3_5_9_9_8 , __lowercase=3_5_9_9_5 , __lowercase=3_5_9_9_9 , **__lowercase , ) -> str: lowerCAmelCase_ : Any = vocab_size lowerCAmelCase_ : Any = max_position_embeddings lowerCAmelCase_ : str = d_model lowerCAmelCase_ : int = d_ff lowerCAmelCase_ : Optional[int] = d_ext lowerCAmelCase_ : int = d_spout lowerCAmelCase_ : Dict = num_switch_layers lowerCAmelCase_ : Any = num_ext_layers lowerCAmelCase_ : Dict = num_switch_layers + num_ext_layers lowerCAmelCase_ : List[str] = num_heads lowerCAmelCase_ : Tuple = num_experts lowerCAmelCase_ : Dict = expert_capacity lowerCAmelCase_ : Optional[Any] = dropout_rate lowerCAmelCase_ : Dict = layer_norm_epsilon lowerCAmelCase_ : Optional[Any] = router_bias lowerCAmelCase_ : str = router_jitter_noise lowerCAmelCase_ : List[Any] = router_dtype lowerCAmelCase_ : Optional[Any] = router_ignore_padding_tokens lowerCAmelCase_ : Any = output_hidden_states lowerCAmelCase_ : int = output_attentions lowerCAmelCase_ : Tuple = initializer_factor lowerCAmelCase_ : Tuple = output_router_logits lowerCAmelCase_ : int = use_cache super().__init__( separator_token_id=__lowercase , pad_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase , )
715
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _UpperCAmelCase : Any ="""src/transformers""" # This is to make sure the transformers module imported is the one in the repo. _UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS) _UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` _UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") _UpperCAmelCase : Any ={ """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : Any = None # source code of `config_class` lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ ) lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/''' ): lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: lowerCAmelCase_ : List[str] = ckpt_name break return checkpoint def lowerCAmelCase ( )-> Optional[Any]: lowerCAmelCase_ : Tuple = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ ) lowerCAmelCase_ : Tuple = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) ) raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
619
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _UpperCAmelCase : Union[str, Any] ={ """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Dict =["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : List[str] =["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : int =["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys _UpperCAmelCase : str =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
716
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple: lowerCAmelCase_ : Optional[int] = scheduler lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers] lowerCAmelCase_ : str = split_batches lowerCAmelCase_ : Any = step_with_optimizer lowerCAmelCase_ : Optional[Any] = GradientState() def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__lowercase , **__lowercase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__lowercase , **__lowercase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes for _ in range(__lowercase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , '''total_steps''' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__lowercase , **__lowercase ) else: self.scheduler.step(*__lowercase , **__lowercase ) def lowercase_ ( self ) -> Union[str, Any]: return self.scheduler.get_last_lr() def lowercase_ ( self ) -> List[str]: return self.scheduler.state_dict() def lowercase_ ( self , __lowercase ) -> int: self.scheduler.load_state_dict(__lowercase ) def lowercase_ ( self ) -> Tuple: return self.scheduler.get_lr() def lowercase_ ( self , *__lowercase , **__lowercase ) -> int: return self.scheduler.print_lr(*__lowercase , **__lowercase )
619
0
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase=2 , __lowercase=3 , __lowercase=4 , __lowercase=2 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=9_9 , __lowercase=3_6 , __lowercase=3 , __lowercase=4 , __lowercase=3_7 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=1_6 , __lowercase=2 , __lowercase=0.02 , __lowercase=6 , __lowercase=6 , __lowercase=3 , __lowercase=4 , __lowercase=None , __lowercase=1_0_0_0 , ) -> Tuple: lowerCAmelCase_ : List[Any] = parent lowerCAmelCase_ : Optional[int] = batch_size lowerCAmelCase_ : Union[str, Any] = num_channels lowerCAmelCase_ : str = image_size lowerCAmelCase_ : int = patch_size lowerCAmelCase_ : List[Any] = text_seq_length lowerCAmelCase_ : str = is_training lowerCAmelCase_ : Any = use_input_mask lowerCAmelCase_ : int = use_token_type_ids lowerCAmelCase_ : str = use_labels lowerCAmelCase_ : Optional[Any] = vocab_size lowerCAmelCase_ : int = hidden_size lowerCAmelCase_ : List[str] = num_hidden_layers lowerCAmelCase_ : Any = num_attention_heads lowerCAmelCase_ : Optional[int] = intermediate_size lowerCAmelCase_ : int = hidden_act lowerCAmelCase_ : List[str] = hidden_dropout_prob lowerCAmelCase_ : List[str] = attention_probs_dropout_prob lowerCAmelCase_ : Union[str, Any] = max_position_embeddings lowerCAmelCase_ : Dict = type_vocab_size lowerCAmelCase_ : Union[str, Any] = type_sequence_label_size lowerCAmelCase_ : Union[str, Any] = initializer_range lowerCAmelCase_ : int = coordinate_size lowerCAmelCase_ : List[Any] = shape_size lowerCAmelCase_ : Union[str, Any] = num_labels lowerCAmelCase_ : str = num_choices lowerCAmelCase_ : List[Any] = scope lowerCAmelCase_ : int = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) lowerCAmelCase_ : Any = text_seq_length lowerCAmelCase_ : str = (image_size // patch_size) ** 2 + 1 lowerCAmelCase_ : Optional[Any] = self.text_seq_length + self.image_seq_length def lowercase_ ( self ) -> str: lowerCAmelCase_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowerCAmelCase_ : Optional[int] = bbox[i, j, 3] lowerCAmelCase_ : List[Any] = bbox[i, j, 1] lowerCAmelCase_ : int = t if bbox[i, j, 2] < bbox[i, j, 0]: lowerCAmelCase_ : int = bbox[i, j, 2] lowerCAmelCase_ : Optional[int] = bbox[i, j, 0] lowerCAmelCase_ : List[str] = t lowerCAmelCase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase_ : Union[str, Any] = None if self.use_input_mask: lowerCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.text_seq_length] ) lowerCAmelCase_ : Union[str, Any] = None if self.use_token_type_ids: lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) lowerCAmelCase_ : Any = None lowerCAmelCase_ : int = None if self.use_labels: lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ : int = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) lowerCAmelCase_ : Optional[int] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int: lowerCAmelCase_ : str = LayoutLMvaModel(config=__lowercase ) model.to(__lowercase ) model.eval() # text + image lowerCAmelCase_ : Optional[Any] = model(__lowercase , pixel_values=__lowercase ) lowerCAmelCase_ : Dict = model( __lowercase , bbox=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase ) lowerCAmelCase_ : Dict = model(__lowercase , bbox=__lowercase , pixel_values=__lowercase , token_type_ids=__lowercase ) lowerCAmelCase_ : List[str] = model(__lowercase , bbox=__lowercase , pixel_values=__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only lowerCAmelCase_ : Optional[Any] = model(__lowercase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only lowerCAmelCase_ : Tuple = model(pixel_values=__lowercase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]: lowerCAmelCase_ : Dict = self.num_labels lowerCAmelCase_ : str = LayoutLMvaForSequenceClassification(__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : List[Any] = model( __lowercase , bbox=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[int]: lowerCAmelCase_ : Optional[Any] = self.num_labels lowerCAmelCase_ : Union[str, Any] = LayoutLMvaForTokenClassification(config=__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Tuple = model( __lowercase , bbox=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : List[Any] = LayoutLMvaForQuestionAnswering(config=__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Optional[Any] = model( __lowercase , bbox=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs() ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) : Any = config_and_inputs lowerCAmelCase_ : Optional[int] = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class snake_case__( UpperCamelCase__, UpperCamelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : List[Any] = False SCREAMING_SNAKE_CASE__ : Optional[Any] = False SCREAMING_SNAKE_CASE__ : Optional[int] = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ : List[str] = ( {"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel} if is_torch_available() else {} ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple: # `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual # embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has # the sequence dimension of the text embedding only. # (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`) return True def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : Optional[int] = LayoutLMvaModelTester(self ) lowerCAmelCase_ : List[str] = ConfigTester(self , config_class=__lowercase , hidden_size=3_7 ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase=False ) -> str: lowerCAmelCase_ : Any = copy.deepcopy(__lowercase ) if model_class in get_values(__lowercase ): lowerCAmelCase_ : List[str] = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(__lowercase , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__lowercase ): lowerCAmelCase_ : Dict = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__lowercase ) elif model_class in get_values(__lowercase ): lowerCAmelCase_ : Tuple = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowercase ) lowerCAmelCase_ : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowercase ) elif model_class in [ *get_values(__lowercase ), ]: lowerCAmelCase_ : List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowercase ) elif model_class in [ *get_values(__lowercase ), ]: lowerCAmelCase_ : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__lowercase , ) return inputs_dict def lowercase_ ( self ) -> List[str]: self.config_tester.run_common_tests() def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase_ : str = type self.model_tester.create_and_check_model(*__lowercase ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowercase ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowercase ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowercase ) @slow def lowercase_ ( self ) -> Any: for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ : Dict = LayoutLMvaModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def lowerCAmelCase ( ): lowerCAmelCase_ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch class snake_case__( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase_ ( self ) -> Tuple: return LayoutLMvaImageProcessor(apply_ocr=__lowercase ) if is_vision_available() else None @slow def lowercase_ ( self ) -> int: lowerCAmelCase_ : str = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(__lowercase ) lowerCAmelCase_ : Any = self.default_image_processor lowerCAmelCase_ : Optional[Any] = prepare_img() lowerCAmelCase_ : Optional[int] = image_processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase ) lowerCAmelCase_ : List[Any] = torch.tensor([[1, 2]] ) lowerCAmelCase_ : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass lowerCAmelCase_ : Dict = model( input_ids=input_ids.to(__lowercase ) , bbox=bbox.to(__lowercase ) , pixel_values=pixel_values.to(__lowercase ) , ) # verify the logits lowerCAmelCase_ : Dict = torch.Size((1, 1_9_9, 7_6_8) ) self.assertEqual(outputs.last_hidden_state.shape , __lowercase ) lowerCAmelCase_ : Tuple = torch.tensor( [[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ) )
717
from manim import * class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : int = [mem.copy() for i in range(6 )] lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 ) lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowercase ) lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )] lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 ) lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) gpu.move_to([-1, -1, 0] ) self.add(__lowercase ) lowerCAmelCase_ : str = [mem.copy() for i in range(6 )] lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 ) lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) model.move_to([3, -1.0, 0] ) self.add(__lowercase ) lowerCAmelCase_ : int = [] lowerCAmelCase_ : int = [] lowerCAmelCase_ : Dict = [] for i, rect in enumerate(__lowercase ): rect.set_stroke(__lowercase ) lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 ) self.add(__lowercase ) model_cpu_arr.append(__lowercase ) self.add(*__lowercase , *__lowercase , *__lowercase ) lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 ) lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__lowercase ) lowerCAmelCase_ : Optional[Any] = [] lowerCAmelCase_ : Dict = [] for i, rect in enumerate(__lowercase ): lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 ) target.move_to(__lowercase ) ckpt_arr.append(__lowercase ) lowerCAmelCase_ : Union[str, Any] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__lowercase ) self.add(*__lowercase , *__lowercase ) lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase_ : str = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowercase , __lowercase ) lowerCAmelCase_ : str = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , ) blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__lowercase ) lowerCAmelCase_ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )] lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 ) lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) ) lowerCAmelCase_ : int = [] for i, rect in enumerate(__lowercase ): lowerCAmelCase_ : int = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__lowercase , run_time=1.5 ) ) self.play(*__lowercase ) self.play(FadeOut(__lowercase ) ) lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowercase , run_time=3 ) ) self.play( FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , ) self.wait()
619
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _UpperCAmelCase : Optional[Any] ={"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : List[str] =["""PLBartTokenizer"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Any =[ """PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """PLBartForCausalLM""", """PLBartForConditionalGeneration""", """PLBartForSequenceClassification""", """PLBartModel""", """PLBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys _UpperCAmelCase : Optional[int] =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
718
_UpperCAmelCase : Dict =[ (1000, """M"""), (900, """CM"""), (500, """D"""), (400, """CD"""), (100, """C"""), (90, """XC"""), (50, """L"""), (40, """XL"""), (10, """X"""), (9, """IX"""), (5, """V"""), (4, """IV"""), (1, """I"""), ] def lowerCAmelCase ( lowerCAmelCase_ )-> int: lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000} lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : List[str] = 0 while place < len(lowerCAmelCase_ ): if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : List[Any] = [] for arabic, roman in ROMAN: ((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ ) result.append(roman * factor ) if number == 0: break return "".join(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
619
0
from typing import List from .keymap import KEYMAP, get_character def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: def decorator(lowerCAmelCase_ ): lowerCAmelCase_ : Optional[Any] = getattr(lowercase__ , '''handle_key''' , [] ) handle += [key] setattr(lowercase__ , '''handle_key''' , lowercase__ ) return func return decorator def lowerCAmelCase ( *lowerCAmelCase_ )-> Union[str, Any]: def decorator(lowerCAmelCase_ ): lowerCAmelCase_ : Optional[Any] = getattr(lowercase__ , '''handle_key''' , [] ) handle += keys setattr(lowercase__ , '''handle_key''' , lowercase__ ) return func return decorator class snake_case__( __a ): '''simple docstring''' def __new__( cls , __lowercase , __lowercase , __lowercase ) -> Optional[int]: lowerCAmelCase_ : str = super().__new__(cls , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if not hasattr(lowerCAmelCase_ , '''key_handler''' ): setattr(lowerCAmelCase_ , '''key_handler''' , {} ) setattr(lowerCAmelCase_ , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): lowerCAmelCase_ : str = getattr(lowerCAmelCase_ , '''handle_key''' , [] ) for key in handled_keys: lowerCAmelCase_ : Any = value return new_cls @staticmethod def lowercase_ ( cls ) -> Optional[int]: lowerCAmelCase_ : Dict = get_character() if char != KEYMAP["undefined"]: lowerCAmelCase_ : Dict = ord(lowerCAmelCase_ ) lowerCAmelCase_ : str = cls.key_handler.get(lowerCAmelCase_ ) if handler: lowerCAmelCase_ : int = char return handler(cls ) else: return None def lowerCAmelCase ( cls )-> Union[str, Any]: return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
719
import csv import tweepy # Twitter API credentials _UpperCAmelCase : int ="""""" _UpperCAmelCase : Optional[int] ="""""" _UpperCAmelCase : Dict ="""""" _UpperCAmelCase : str ="""""" def lowerCAmelCase ( lowerCAmelCase_ )-> None: # authorize twitter, initialize tweepy lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ ) auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ ) # initialize a list to hold all the tweepy Tweets lowerCAmelCase_ : Dict = [] # make initial request for most recent tweets (200 is the maximum allowed count) lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # save the id of the oldest tweet less one lowerCAmelCase_ : str = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowerCAmelCase_ ) > 0: print(f"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates lowerCAmelCase_ : Optional[Any] = api.user_timeline( screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # update the id of the oldest tweet less one lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1 print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f: lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(lowerCAmelCase_ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
619
0
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = None def lowercase_ ( self ) -> int: lowerCAmelCase_ : int = self.feature_extraction_class(**self.feat_extract_dict ) lowerCAmelCase_ : int = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , __a ) def lowercase_ ( self ) -> int: lowerCAmelCase_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase_ : str = os.path.join(__a , '''feat_extract.json''' ) feat_extract_first.to_json_file(__a ) lowerCAmelCase_ : Optional[Any] = self.feature_extraction_class.from_json_file(__a ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase_ : Any = feat_extract_first.save_pretrained(__a )[0] check_json_file_has_correct_format(__a ) lowerCAmelCase_ : Optional[int] = self.feature_extraction_class.from_pretrained(__a ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : Tuple = self.feature_extraction_class() self.assertIsNotNone(__a )
720
from math import sqrt def lowerCAmelCase ( lowerCAmelCase_ )-> bool: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase_ : str = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase_ : List[Any] = False for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase_ : Any = False break # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool" return status def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) ) lowerCAmelCase_ : List[Any] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowerCAmelCase_ ) ): for j in range(i + 1 , len(lowerCAmelCase_ ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase_ : Tuple = 0 # filters actual prime numbers. lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0] # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> int: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase_ : List[str] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(lowerCAmelCase_ ): ans.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase_ : Any = 2 lowerCAmelCase_ : List[str] = number if number == 0 or number == 1: ans.append(lowerCAmelCase_ ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowerCAmelCase_ ): while quotient != 1: if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0): ans.append(lowerCAmelCase_ ) quotient /= factor else: factor += 1 else: ans.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = 0 # prime factorization of 'number' lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : Dict = max(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> str: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = 0 # prime factorization of 'number' lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int" assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool" return number % 2 == 0 def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int" assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool" return number % 2 != 0 def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ ) ), "'number' must been an int, even and > 2" lowerCAmelCase_ : Union[str, Any] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ ) lowerCAmelCase_ : Any = len(lowerCAmelCase_ ) # run variable for while-loops. lowerCAmelCase_ : List[Any] = 0 lowerCAmelCase_ : List[Any] = None # exit variable. for break up the loops lowerCAmelCase_ : int = True while i < len_pn and loop: lowerCAmelCase_ : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase_ : Union[str, Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (len(lowerCAmelCase_ ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase_ : List[str] = 0 while numbera != 0: lowerCAmelCase_ : int = numbera % numbera lowerCAmelCase_ : Union[str, Any] = numbera lowerCAmelCase_ : Tuple = rest # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase_ : Dict = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ ) elif numbera == 1 or numbera == 1: lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : str = 0 lowerCAmelCase_ : List[str] = 0 lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ): ans *= n else: lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ): ans *= n done.append(lowerCAmelCase_ ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ): ans *= n done.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowerCAmelCase_ ): ans += 1 # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime( lowerCAmelCase_ ), "'ans' must been a prime number and from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict: assert ( is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number lowerCAmelCase_ : Dict = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowerCAmelCase_ ): number += 1 while number < p_number_a: ans.append(lowerCAmelCase_ ) number += 1 # fetch the next prime number. while not is_prime(lowerCAmelCase_ ): number += 1 # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ans[0] != p_number_a and ans[len(lowerCAmelCase_ ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase_ : List[str] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(lowerCAmelCase_ ) # precondition assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ ) # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (divisors[0] == 1) and (divisors[len(lowerCAmelCase_ ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) ) # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase_ : Tuple = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Optional[Any] = 1 lowerCAmelCase_ : Tuple = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase_ : Any = ans ans += fiba lowerCAmelCase_ : Dict = tmp return ans
619
0
from __future__ import annotations from collections.abc import Iterator class snake_case__: '''simple docstring''' def __init__( self , __lowercase ) -> None: lowerCAmelCase_ : str = value lowerCAmelCase_ : List[str] = None lowerCAmelCase_ : List[str] = None class snake_case__: '''simple docstring''' def __init__( self , __lowercase ) -> None: lowerCAmelCase_ : List[str] = tree def lowercase_ ( self , __lowercase ) -> int: if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self ) -> Iterator[int]: yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
721
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. _UpperCAmelCase : Tuple =10 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int: for i in range(lowerCAmelCase_ , lowerCAmelCase_ ): if array[i] == target: return i return -1 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int: lowerCAmelCase_ : Any = 0 lowerCAmelCase_ : int = len(lowerCAmelCase_ ) while left <= right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1 lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: lowerCAmelCase_ : Dict = one_third - 1 elif array[two_third] < target: lowerCAmelCase_ : List[Any] = two_third + 1 else: lowerCAmelCase_ : Union[str, Any] = one_third + 1 lowerCAmelCase_ : Tuple = two_third - 1 else: return -1 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int: if left < right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1 lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip() _UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." _UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip()) _UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target) _UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f"""Iterative search: {target} found at positions: {resulta}""") print(f"""Recursive search: {target} found at positions: {resulta}""") else: print("""Not found""")
619
0
import argparse import os import re _UpperCAmelCase : Optional[int] ="""src/transformers""" # Pattern that looks at the indentation in a line. _UpperCAmelCase : List[Any] =re.compile(R"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. _UpperCAmelCase : Optional[int] =re.compile(R"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. _UpperCAmelCase : Optional[int] =re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. _UpperCAmelCase : str =re.compile(R"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. _UpperCAmelCase : int =re.compile(R"""\[([^\]]+)\]""") def lowerCAmelCase ( lowerCAmelCase_ )-> Any: lowerCAmelCase_ : List[str] = _re_indent.search(lowerCAmelCase_ ) return "" if search is None else search.groups()[0] def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , lowerCAmelCase_=None )-> Tuple: lowerCAmelCase_ : Any = 0 lowerCAmelCase_ : Dict = code.split('''\n''' ) if start_prompt is not None: while not lines[index].startswith(lowerCAmelCase_ ): index += 1 lowerCAmelCase_ : List[str] = ['''\n'''.join(lines[:index] )] else: lowerCAmelCase_ : Optional[int] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). lowerCAmelCase_ : Union[str, Any] = [lines[index]] index += 1 while index < len(lowerCAmelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase_ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(lowerCAmelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ): current_block.append(lines[index] ) blocks.append('''\n'''.join(lowerCAmelCase_ ) ) if index < len(lowerCAmelCase_ ) - 1: lowerCAmelCase_ : Union[str, Any] = [lines[index + 1]] index += 1 else: lowerCAmelCase_ : Dict = [] else: blocks.append('''\n'''.join(lowerCAmelCase_ ) ) lowerCAmelCase_ : int = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(lowerCAmelCase_ ) > 0: blocks.append('''\n'''.join(lowerCAmelCase_ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(lowerCAmelCase_ ): blocks.append('''\n'''.join(lines[index:] ) ) return blocks def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: def _inner(lowerCAmelCase_ ): return key(lowerCAmelCase_ ).lower().replace('''_''' , '''''' ) return _inner def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=None )-> Optional[int]: # If no key is provided, we use a noop. def noop(lowerCAmelCase_ ): return x if key is None: lowerCAmelCase_ : Any = noop # Constants are all uppercase, they go first. lowerCAmelCase_ : List[str] = [obj for obj in objects if key(lowerCAmelCase_ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. lowerCAmelCase_ : Optional[int] = [obj for obj in objects if key(lowerCAmelCase_ )[0].isupper() and not key(lowerCAmelCase_ ).isupper()] # Functions begin with a lowercase, they go last. lowerCAmelCase_ : List[Any] = [obj for obj in objects if not key(lowerCAmelCase_ )[0].isupper()] lowerCAmelCase_ : Optional[Any] = ignore_underscore(lowerCAmelCase_ ) return sorted(lowerCAmelCase_ , key=lowerCAmelCase_ ) + sorted(lowerCAmelCase_ , key=lowerCAmelCase_ ) + sorted(lowerCAmelCase_ , key=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: # This inner function sort imports between [ ]. def _replace(lowerCAmelCase_ ): lowerCAmelCase_ : Union[str, Any] = match.groups()[0] if "," not in imports: return f"""[{imports}]""" lowerCAmelCase_ : int = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCAmelCase_ : Tuple = keys[:-1] return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(lowerCAmelCase_ )] ) + "]" lowerCAmelCase_ : Dict = import_statement.split('''\n''' ) if len(lowerCAmelCase_ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. lowerCAmelCase_ : Dict = 2 if lines[1].strip() == '''[''' else 1 lowerCAmelCase_ : Tuple = [(i, _re_strip_line.search(lowerCAmelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] lowerCAmelCase_ : Any = sort_objects(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[1] ) lowerCAmelCase_ : int = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(lowerCAmelCase_ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: lowerCAmelCase_ : Any = _re_bracket_content.sub(_replace , lines[1] ) else: lowerCAmelCase_ : Dict = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCAmelCase_ : Dict = keys[:-1] lowerCAmelCase_ : Union[str, Any] = get_indent(lines[1] ) + ''', '''.join([f"""\"{k}\"""" for k in sort_objects(lowerCAmelCase_ )] ) return "\n".join(lowerCAmelCase_ ) else: # Finally we have to deal with imports fitting on one line lowerCAmelCase_ : Optional[Any] = _re_bracket_content.sub(_replace , lowerCAmelCase_ ) return import_statement def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=True )-> Dict: with open(lowerCAmelCase_ , encoding='''utf-8''' ) as f: lowerCAmelCase_ : Union[str, Any] = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 lowerCAmelCase_ : Optional[Any] = split_code_in_indented_blocks( lowerCAmelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(lowerCAmelCase_ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. lowerCAmelCase_ : Optional[int] = main_blocks[block_idx] lowerCAmelCase_ : int = block.split('''\n''' ) # Get to the start of the imports. lowerCAmelCase_ : Dict = 0 while line_idx < len(lowerCAmelCase_ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: lowerCAmelCase_ : List[Any] = len(lowerCAmelCase_ ) else: line_idx += 1 if line_idx >= len(lowerCAmelCase_ ): continue # Ignore beginning and last line: they don't contain anything. lowerCAmelCase_ : Any = '''\n'''.join(block_lines[line_idx:-1] ) lowerCAmelCase_ : List[str] = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. lowerCAmelCase_ : Optional[Any] = split_code_in_indented_blocks(lowerCAmelCase_ , indent_level=lowerCAmelCase_ ) # We have two categories of import key: list or _import_structure[key].append/extend lowerCAmelCase_ : Union[str, Any] = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. lowerCAmelCase_ : Union[str, Any] = [(pattern.search(lowerCAmelCase_ ).groups()[0] if pattern.search(lowerCAmelCase_ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. lowerCAmelCase_ : str = [(i, key) for i, key in enumerate(lowerCAmelCase_ ) if key is not None] lowerCAmelCase_ : Optional[Any] = [x[0] for x in sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. lowerCAmelCase_ : Any = 0 lowerCAmelCase_ : Optional[Any] = [] for i in range(len(lowerCAmelCase_ ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: lowerCAmelCase_ : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(lowerCAmelCase_ ) count += 1 # And we put our main block back together with its first and last line. lowerCAmelCase_ : str = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(lowerCAmelCase_ ): if check_only: return True else: print(f"""Overwriting {file}.""" ) with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(lowerCAmelCase_ ) ) def lowerCAmelCase ( lowerCAmelCase_=True )-> int: lowerCAmelCase_ : Dict = [] for root, _, files in os.walk(lowerCAmelCase_ ): if "__init__.py" in files: lowerCAmelCase_ : Optional[Any] = sort_imports(os.path.join(lowerCAmelCase_ , '''__init__.py''' ) , check_only=lowerCAmelCase_ ) if result: lowerCAmelCase_ : int = [os.path.join(lowerCAmelCase_ , '''__init__.py''' )] if len(lowerCAmelCase_ ) > 0: raise ValueError(f"""Would overwrite {len(lowerCAmelCase_ )} files, run `make style`.""" ) if __name__ == "__main__": _UpperCAmelCase : List[str] =argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") _UpperCAmelCase : Tuple =parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
700
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _UpperCAmelCase : Union[str, Any] ={ """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Tuple =[ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys _UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
619
0
import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class snake_case__( unittest.TestCase, UpperCAmelCase__ ): '''simple docstring''' def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[int] = load_tool('''text-to-speech''' ) self.tool.setup() def lowercase_ ( self ) -> Optional[int]: torch.manual_seed(0 ) lowerCAmelCase_ : Optional[Any] = self.tool('''hey''' ) lowerCAmelCase_ : int = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) ) def lowercase_ ( self ) -> Optional[Any]: torch.manual_seed(0 ) lowerCAmelCase_ : List[Any] = self.tool('''hey''' ) lowerCAmelCase_ : Tuple = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
701
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu _UpperCAmelCase : Any =False class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase_ ( self ) -> Union[str, Any]: return 1_2 @property def lowercase_ ( self ) -> Any: return 1_2 @property def lowercase_ ( self ) -> Optional[Any]: return 3_2 @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ : Any = VQModel( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(__lowercase ) @property def lowercase_ ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = 1_2 lowerCAmelCase_ : int = 1_2 lowerCAmelCase_ : Union[str, Any] = { '''attention_bias''': True, '''cross_attention_dim''': 3_2, '''attention_head_dim''': height * width, '''num_attention_heads''': 1, '''num_vector_embeds''': self.num_embed, '''num_embeds_ada_norm''': self.num_embeds_ada_norm, '''norm_num_groups''': 3_2, '''sample_size''': width, '''activation_fn''': '''geglu-approximate''', } lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase ) return model def lowercase_ ( self ) -> str: lowerCAmelCase_ : List[Any] = '''cpu''' lowerCAmelCase_ : Any = self.dummy_vqvae lowerCAmelCase_ : str = self.dummy_text_encoder lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer lowerCAmelCase_ : int = self.dummy_transformer lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase ) lowerCAmelCase_ : Dict = VQDiffusionPipeline( vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) lowerCAmelCase_ : int = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = '''teddy bear playing in the pool''' lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase_ : Union[str, Any] = output.images lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : List[Any] = pipe( [prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0] lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : Optional[Any] = '''cpu''' lowerCAmelCase_ : str = self.dummy_vqvae lowerCAmelCase_ : Dict = self.dummy_text_encoder lowerCAmelCase_ : List[Any] = self.dummy_tokenizer lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings( learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) lowerCAmelCase_ : List[str] = VQDiffusionPipeline( vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = '''teddy bear playing in the pool''' lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase_ : str = output.images lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = pipe( [prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0] lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> int: lowerCAmelCase_ : Tuple = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' ) lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' ) lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase ) pipeline.set_progress_bar_config(disable=__lowercase ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Optional[int] = pipeline( '''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , ) lowerCAmelCase_ : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) assert np.abs(expected_image - image ).max() < 2.0
619
0
import argparse import os import re import packaging.version _UpperCAmelCase : Optional[int] ="""examples/""" _UpperCAmelCase : Any ={ """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } _UpperCAmelCase : str ={ """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } _UpperCAmelCase : Union[str, Any] ="""README.md""" def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): with open(lowercase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase_ : List[str] = f.read() lowerCAmelCase_ , lowerCAmelCase_ : str = REPLACE_PATTERNS[pattern] lowerCAmelCase_ : List[Any] = replace.replace('''VERSION''' , lowercase__ ) lowerCAmelCase_ : Optional[int] = re_pattern.sub(lowercase__ , lowercase__ ) with open(lowercase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(lowercase__ ) def lowerCAmelCase ( lowerCAmelCase_ ): for folder, directories, fnames in os.walk(lowercase__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(lowercase__ , lowercase__ ) , lowercase__ , pattern='''examples''' ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(lowercase__ , lowercase__ , lowercase__ ) if not patch: update_version_in_examples(lowercase__ ) def lowerCAmelCase ( ): lowerCAmelCase_ : Optional[int] = '''🤗 Transformers currently provides the following architectures''' lowerCAmelCase_ : int = '''1. Want to contribute a new model?''' with open(lowercase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase_ : Tuple = f.readlines() # Find the start of the list. lowerCAmelCase_ : List[Any] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCAmelCase_ : Optional[Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): lowerCAmelCase_ : Optional[Any] = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(lowercase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lowercase__ ) def lowerCAmelCase ( ): with open(REPLACE_FILES['''init'''] , '''r''' ) as f: lowerCAmelCase_ : Any = f.read() lowerCAmelCase_ : List[Any] = REPLACE_PATTERNS['''init'''][0].search(lowercase__ ).groups()[0] return packaging.version.parse(lowercase__ ) def lowerCAmelCase ( lowerCAmelCase_=False ): lowerCAmelCase_ : Optional[int] = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: lowerCAmelCase_ : int = default_version.base_version elif patch: lowerCAmelCase_ : List[str] = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: lowerCAmelCase_ : Optional[Any] = f"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. lowerCAmelCase_ : Optional[int] = input(f"""Which version are you releasing? [{default_version}]""" ) if len(lowercase__ ) == 0: lowerCAmelCase_ : List[Any] = default_version print(f"""Updating version to {version}.""" ) global_version_update(lowercase__ , patch=lowercase__ ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def lowerCAmelCase ( ): lowerCAmelCase_ : List[str] = get_version() lowerCAmelCase_ : str = f"""{current_version.major}.{current_version.minor + 1}.0.dev0""" lowerCAmelCase_ : Union[str, Any] = current_version.base_version # Check with the user we got that right. lowerCAmelCase_ : int = input(f"""Which version are we developing now? [{dev_version}]""" ) if len(lowercase__ ) == 0: lowerCAmelCase_ : List[str] = dev_version print(f"""Updating version to {version}.""" ) global_version_update(lowercase__ ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": _UpperCAmelCase : Dict =argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") _UpperCAmelCase : int =parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
702
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: _UpperCAmelCase : Dict =None _UpperCAmelCase : Tuple =logging.get_logger(__name__) _UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} _UpperCAmelCase : Any ={ """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", }, """tokenizer_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""", }, } _UpperCAmelCase : Dict ={ """xlnet-base-cased""": None, """xlnet-large-cased""": None, } _UpperCAmelCase : Tuple ="""▁""" # Segments (not really needed) _UpperCAmelCase : str =0 _UpperCAmelCase : List[str] =1 _UpperCAmelCase : int =2 _UpperCAmelCase : Any =3 _UpperCAmelCase : List[Any] =4 class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Any = """left""" SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]: # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token super().__init__( vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , ) lowerCAmelCase_ : List[Any] = 3 lowerCAmelCase_ : Dict = do_lower_case lowerCAmelCase_ : Dict = remove_space lowerCAmelCase_ : List[str] = keep_accents lowerCAmelCase_ : List[str] = vocab_file lowerCAmelCase_ : str = False if not self.vocab_file else True def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Tuple = [self.sep_token_id] lowerCAmelCase_ : Any = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Optional[Any] = [self.sep_token_id] lowerCAmelCase_ : List[Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__lowercase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ : str = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ): copyfile(self.vocab_file , __lowercase ) return (out_vocab_file,)
619
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _UpperCAmelCase : Optional[Any] =logging.get_logger(__name__) _UpperCAmelCase : Optional[Any] ={ 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]: for attribute in key.split('''.''' ): lowerCAmelCase_ : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if weight_type is not None: lowerCAmelCase_ : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape else: lowerCAmelCase_ : int = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowerCAmelCase_ : List[str] = value elif weight_type == "weight_g": lowerCAmelCase_ : Tuple = value elif weight_type == "weight_v": lowerCAmelCase_ : str = value elif weight_type == "bias": lowerCAmelCase_ : Optional[int] = value else: lowerCAmelCase_ : str = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Any: lowerCAmelCase_ : int = [] lowerCAmelCase_ : str = fairseq_model.state_dict() lowerCAmelCase_ : Tuple = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): lowerCAmelCase_ : int = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == '''group''' , ) lowerCAmelCase_ : int = True else: for key, mapped_key in MAPPING.items(): lowerCAmelCase_ : List[str] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned): lowerCAmelCase_ : Any = True if "*" in mapped_key: lowerCAmelCase_ : List[Any] = name.split(lowerCAmelCase_ )[0].split('''.''' )[-2] lowerCAmelCase_ : str = mapped_key.replace('''*''' , lowerCAmelCase_ ) if "weight_g" in name: lowerCAmelCase_ : Tuple = '''weight_g''' elif "weight_v" in name: lowerCAmelCase_ : Tuple = '''weight_v''' elif "weight" in name: lowerCAmelCase_ : str = '''weight''' elif "bias" in name: lowerCAmelCase_ : List[str] = '''bias''' else: lowerCAmelCase_ : Union[str, Any] = None set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) continue if not is_used: unused_weights.append(lowerCAmelCase_ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]: lowerCAmelCase_ : int = full_name.split('''conv_layers.''' )[-1] lowerCAmelCase_ : Any = name.split('''.''' ) lowerCAmelCase_ : str = int(items[0] ) lowerCAmelCase_ : Dict = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowerCAmelCase_ : Any = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowerCAmelCase_ : Tuple = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) lowerCAmelCase_ : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowerCAmelCase_ : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase_ ) @torch.no_grad() def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True )-> Any: if config_path is not None: lowerCAmelCase_ : List[Any] = HubertConfig.from_pretrained(lowerCAmelCase_ ) else: lowerCAmelCase_ : Dict = HubertConfig() if is_finetuned: if dict_path: lowerCAmelCase_ : Dict = Dictionary.load(lowerCAmelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCAmelCase_ : int = target_dict.pad_index lowerCAmelCase_ : List[str] = target_dict.bos_index lowerCAmelCase_ : Union[str, Any] = target_dict.eos_index lowerCAmelCase_ : Tuple = len(target_dict.symbols ) lowerCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , '''vocab.json''' ) if not os.path.isdir(lowerCAmelCase_ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowerCAmelCase_ ) ) return os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , lowerCAmelCase_ ) lowerCAmelCase_ : Any = WavaVecaCTCTokenizer( lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowerCAmelCase_ , ) lowerCAmelCase_ : Dict = True if config.feat_extract_norm == '''layer''' else False lowerCAmelCase_ : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ) lowerCAmelCase_ : int = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = HubertForCTC(lowerCAmelCase_ ) else: lowerCAmelCase_ : List[Any] = HubertModel(lowerCAmelCase_ ) if is_finetuned: lowerCAmelCase_ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: lowerCAmelCase_ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) lowerCAmelCase_ : Union[str, Any] = model[0].eval() recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) hf_wavavec.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": _UpperCAmelCase : str =argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) _UpperCAmelCase : List[str] =parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
703
import math import qiskit def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts: if ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) ): raise TypeError('''inputs must be integers.''' ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError('''inputs must be positive.''' ) if ( (math.floor(lowerCAmelCase_ ) != input_a) or (math.floor(lowerCAmelCase_ ) != input_a) or (math.floor(lowerCAmelCase_ ) != carry_in) ): raise ValueError('''inputs must be exact integers.''' ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError('''inputs must be less or equal to 2.''' ) # build registers lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' ) lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' ) # list the entries lowerCAmelCase_ : Any = [input_a, input_a, carry_in] lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' ) lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 ) return job.result().get_counts(lowerCAmelCase_ ) if __name__ == "__main__": print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
619
0
import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class snake_case__( _UpperCAmelCase, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def lowercase_ ( self , __lowercase=0 ) -> List[str]: lowerCAmelCase_ : List[str] = np.random.RandomState(lowercase__ ) lowerCAmelCase_ : List[str] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def lowercase_ ( self ) -> int: lowerCAmelCase_ : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=lowercase__ ) lowerCAmelCase_ : Optional[Any] = self.get_dummy_inputs() lowerCAmelCase_ : Dict = pipe(**lowercase__ ).images lowerCAmelCase_ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) lowerCAmelCase_ : Union[str, Any] = np.array([0.6_50_72, 0.5_84_92, 0.4_82_19, 0.5_55_21, 0.5_31_80, 0.5_59_39, 0.5_06_97, 0.3_98_00, 0.4_64_55] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase_ : Union[str, Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) lowerCAmelCase_ : Optional[Any] = self.get_dummy_inputs() lowerCAmelCase_ : str = pipe(**lowercase__ ).images lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) lowerCAmelCase_ : Tuple = np.array([0.6_58_63, 0.5_94_25, 0.4_93_26, 0.5_63_13, 0.5_38_75, 0.5_66_27, 0.5_10_65, 0.3_97_77, 0.4_63_30] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase_ : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowercase__ ) lowerCAmelCase_ : int = self.get_dummy_inputs() lowerCAmelCase_ : Any = pipe(**lowercase__ ).images lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) lowerCAmelCase_ : Dict = np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase_ : Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowercase__ ) lowerCAmelCase_ : Optional[Any] = self.get_dummy_inputs() lowerCAmelCase_ : Optional[Any] = pipe(**lowercase__ ).images lowerCAmelCase_ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) lowerCAmelCase_ : Tuple = np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> Any: lowerCAmelCase_ : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase_ : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowercase__ ) lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs() lowerCAmelCase_ : Optional[Any] = pipe(**lowercase__ ).images lowerCAmelCase_ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) lowerCAmelCase_ : str = np.array([0.5_38_17, 0.6_08_12, 0.4_73_84, 0.4_95_30, 0.5_18_94, 0.4_98_14, 0.4_79_84, 0.3_89_58, 0.4_42_71] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowercase__ ) lowerCAmelCase_ : int = self.get_dummy_inputs() lowerCAmelCase_ : Optional[int] = pipe(**lowercase__ ).images lowerCAmelCase_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) lowerCAmelCase_ : Tuple = np.array([0.5_38_95, 0.6_08_08, 0.4_79_33, 0.4_96_08, 0.5_18_86, 0.4_99_50, 0.4_80_53, 0.3_89_57, 0.4_42_00] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=lowercase__ ) lowerCAmelCase_ : Tuple = self.get_dummy_inputs() lowerCAmelCase_ : List[Any] = 3 * [inputs["""prompt"""]] # forward lowerCAmelCase_ : Optional[Any] = pipe(**lowercase__ ) lowerCAmelCase_ : Optional[Any] = output.images[0, -3:, -3:, -1] lowerCAmelCase_ : Optional[int] = self.get_dummy_inputs() lowerCAmelCase_ : Optional[int] = 3 * [inputs.pop('''prompt''' )] lowerCAmelCase_ : str = pipe.tokenizer( lowercase__ , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=lowercase__ , return_tensors='''np''' , ) lowerCAmelCase_ : Any = text_inputs["""input_ids"""] lowerCAmelCase_ : Dict = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] lowerCAmelCase_ : Optional[int] = prompt_embeds # forward lowerCAmelCase_ : Optional[int] = pipe(**lowercase__ ) lowerCAmelCase_ : int = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=lowercase__ ) lowerCAmelCase_ : Tuple = self.get_dummy_inputs() lowerCAmelCase_ : List[Any] = 3 * ["""this is a negative prompt"""] lowerCAmelCase_ : Any = negative_prompt lowerCAmelCase_ : Dict = 3 * [inputs["""prompt"""]] # forward lowerCAmelCase_ : Optional[int] = pipe(**lowercase__ ) lowerCAmelCase_ : List[Any] = output.images[0, -3:, -3:, -1] lowerCAmelCase_ : Optional[int] = self.get_dummy_inputs() lowerCAmelCase_ : List[Any] = 3 * [inputs.pop('''prompt''' )] lowerCAmelCase_ : List[Any] = [] for p in [prompt, negative_prompt]: lowerCAmelCase_ : str = pipe.tokenizer( lowercase__ , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=lowercase__ , return_tensors='''np''' , ) lowerCAmelCase_ : Any = text_inputs["""input_ids"""] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) lowerCAmelCase_ : str = embeds # forward lowerCAmelCase_ : str = pipe(**lowercase__ ) lowerCAmelCase_ : int = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @nightly @require_onnxruntime @require_torch_gpu class snake_case__( unittest.TestCase ): '''simple docstring''' @property def lowercase_ ( self ) -> List[Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : str = ort.SessionOptions() lowerCAmelCase_ : Any = False return options def lowercase_ ( self ) -> Union[str, Any]: # using the PNDM scheduler by default lowerCAmelCase_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=lowercase__ ) lowerCAmelCase_ : Any = """A painting of a squirrel eating a burger""" np.random.seed(0 ) lowerCAmelCase_ : Dict = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=1_0 , output_type='''np''' ) lowerCAmelCase_ : List[Any] = output.images lowerCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase_ : Dict = np.array([0.04_52, 0.03_90, 0.00_87, 0.03_50, 0.06_17, 0.03_64, 0.05_44, 0.05_23, 0.07_20] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : str = DDIMScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) lowerCAmelCase_ : List[str] = OnnxStableDiffusionPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=lowercase__ , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=lowercase__ ) lowerCAmelCase_ : List[Any] = """open neural network exchange""" lowerCAmelCase_ : str = np.random.RandomState(0 ) lowerCAmelCase_ : Dict = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowercase__ , output_type='''np''' ) lowerCAmelCase_ : int = output.images lowerCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase_ : str = np.array([0.28_67, 0.19_74, 0.14_81, 0.72_94, 0.72_51, 0.66_67, 0.41_94, 0.56_42, 0.64_86] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : Optional[Any] = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) lowerCAmelCase_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=lowercase__ , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=lowercase__ ) lowerCAmelCase_ : Dict = """open neural network exchange""" lowerCAmelCase_ : Any = np.random.RandomState(0 ) lowerCAmelCase_ : str = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowercase__ , output_type='''np''' ) lowerCAmelCase_ : Union[str, Any] = output.images lowerCAmelCase_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase_ : Optional[int] = np.array([0.23_06, 0.19_59, 0.15_93, 0.65_49, 0.63_94, 0.54_08, 0.50_65, 0.60_10, 0.61_61] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase_ ( self ) -> str: lowerCAmelCase_ : str = 0 def test_callback_fn(__lowercase , __lowercase , __lowercase ) -> None: lowerCAmelCase_ : Optional[int] = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 6_4, 6_4) lowerCAmelCase_ : str = latents[0, -3:, -3:, -1] lowerCAmelCase_ : str = np.array( [-0.67_72, -0.38_35, -1.24_56, 0.19_05, -1.09_74, 0.69_67, -1.93_53, 0.01_78, 1.01_67] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 elif step == 5: assert latents.shape == (1, 4, 6_4, 6_4) lowerCAmelCase_ : Union[str, Any] = latents[0, -3:, -3:, -1] lowerCAmelCase_ : Optional[Any] = np.array( [-0.33_51, 0.22_41, -0.18_37, -0.23_25, -0.65_77, 0.33_93, -0.02_41, 0.58_99, 1.38_75] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 lowerCAmelCase_ : Union[str, Any] = False lowerCAmelCase_ : Any = OnnxStableDiffusionPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowercase__ ) lowerCAmelCase_ : Optional[int] = """Andromeda galaxy in a bottle""" lowerCAmelCase_ : Union[str, Any] = np.random.RandomState(0 ) pipe( prompt=lowercase__ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowercase__ , callback=lowercase__ , callback_steps=1 , ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) assert isinstance(lowercase__ , lowercase__ ) assert pipe.safety_checker is None lowerCAmelCase_ : int = pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowercase__ ) lowerCAmelCase_ : int = OnnxStableDiffusionPipeline.from_pretrained(lowercase__ ) # sanity check that the pipeline still works assert pipe.safety_checker is None lowerCAmelCase_ : Optional[Any] = pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None
704
import re def lowerCAmelCase ( lowerCAmelCase_ )-> bool: lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' ) if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
619
0
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class snake_case__( __SCREAMING_SNAKE_CASE ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = (EulerDiscreteScheduler,) SCREAMING_SNAKE_CASE__ : List[Any] = 10 def lowercase_ ( self , **__lowercase ) -> List[Any]: lowerCAmelCase_ : List[Any] = { '''num_train_timesteps''': 1_1_0_0, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**__snake_case ) return config def lowercase_ ( self ) -> Union[str, Any]: for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__snake_case ) def lowercase_ ( self ) -> List[str]: for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ): self.check_over_configs(beta_start=__snake_case , beta_end=__snake_case ) def lowercase_ ( self ) -> Optional[int]: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__snake_case ) def lowercase_ ( self ) -> Tuple: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__snake_case ) def lowercase_ ( self ) -> int: lowerCAmelCase_ : int = self.scheduler_classes[0] lowerCAmelCase_ : List[Any] = self.get_scheduler_config() lowerCAmelCase_ : List[str] = scheduler_class(**__snake_case ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase_ : Optional[Any] = torch.manual_seed(0 ) lowerCAmelCase_ : List[str] = self.dummy_model() lowerCAmelCase_ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase_ : List[str] = sample.to(__snake_case ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase_ : Optional[int] = scheduler.scale_model_input(__snake_case , __snake_case ) lowerCAmelCase_ : Dict = model(__snake_case , __snake_case ) lowerCAmelCase_ : List[str] = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case ) lowerCAmelCase_ : Any = output.prev_sample lowerCAmelCase_ : Optional[int] = torch.sum(torch.abs(__snake_case ) ) lowerCAmelCase_ : Tuple = torch.mean(torch.abs(__snake_case ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2 assert abs(result_mean.item() - 0.01_31 ) < 1e-3 def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase_ : List[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase_ : Dict = scheduler_class(**__snake_case ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase_ : Optional[int] = torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = self.dummy_model() lowerCAmelCase_ : str = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase_ : int = sample.to(__snake_case ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase_ : int = scheduler.scale_model_input(__snake_case , __snake_case ) lowerCAmelCase_ : List[str] = model(__snake_case , __snake_case ) lowerCAmelCase_ : Optional[Any] = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case ) lowerCAmelCase_ : str = output.prev_sample lowerCAmelCase_ : List[Any] = torch.sum(torch.abs(__snake_case ) ) lowerCAmelCase_ : str = torch.mean(torch.abs(__snake_case ) ) assert abs(result_sum.item() - 0.00_02 ) < 1e-2 assert abs(result_mean.item() - 2.2_676e-06 ) < 1e-3 def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : Optional[int] = self.scheduler_classes[0] lowerCAmelCase_ : Any = self.get_scheduler_config() lowerCAmelCase_ : List[str] = scheduler_class(**__snake_case ) scheduler.set_timesteps(self.num_inference_steps , device=__snake_case ) lowerCAmelCase_ : Union[str, Any] = torch.manual_seed(0 ) lowerCAmelCase_ : str = self.dummy_model() lowerCAmelCase_ : int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() lowerCAmelCase_ : List[Any] = sample.to(__snake_case ) for t in scheduler.timesteps: lowerCAmelCase_ : Union[str, Any] = scheduler.scale_model_input(__snake_case , __snake_case ) lowerCAmelCase_ : int = model(__snake_case , __snake_case ) lowerCAmelCase_ : Any = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case ) lowerCAmelCase_ : Tuple = output.prev_sample lowerCAmelCase_ : List[str] = torch.sum(torch.abs(__snake_case ) ) lowerCAmelCase_ : Tuple = torch.mean(torch.abs(__snake_case ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2 assert abs(result_mean.item() - 0.01_31 ) < 1e-3 def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase_ : Tuple = self.get_scheduler_config() lowerCAmelCase_ : List[str] = scheduler_class(**__snake_case , use_karras_sigmas=__snake_case ) scheduler.set_timesteps(self.num_inference_steps , device=__snake_case ) lowerCAmelCase_ : int = torch.manual_seed(0 ) lowerCAmelCase_ : List[str] = self.dummy_model() lowerCAmelCase_ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() lowerCAmelCase_ : Optional[int] = sample.to(__snake_case ) for t in scheduler.timesteps: lowerCAmelCase_ : str = scheduler.scale_model_input(__snake_case , __snake_case ) lowerCAmelCase_ : Optional[int] = model(__snake_case , __snake_case ) lowerCAmelCase_ : int = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case ) lowerCAmelCase_ : Optional[Any] = output.prev_sample lowerCAmelCase_ : str = torch.sum(torch.abs(__snake_case ) ) lowerCAmelCase_ : List[str] = torch.mean(torch.abs(__snake_case ) ) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2 assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63 ) < 1e-3
705
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _UpperCAmelCase : Any =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""] def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None: super().__init__(**__lowercase ) lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4} lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : List[Any] = do_resize lowerCAmelCase_ : Optional[int] = size # Default value set here for backwards compatibility where the value in config is None lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 lowerCAmelCase_ : Tuple = resample lowerCAmelCase_ : Optional[int] = do_rescale lowerCAmelCase_ : Any = rescale_factor lowerCAmelCase_ : List[str] = do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray: lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) if "shortest_edge" not in size: raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" ) lowerCAmelCase_ : Optional[int] = size['''shortest_edge'''] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct ) lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase ) else: # warping (no cropping) when evaluated at 384 or larger return resize( __lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any: return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray: return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image: lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct lowerCAmelCase_ : str = resample if resample is not None else self.resample lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std lowerCAmelCase_ : int = size if size is not None else self.size lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images] if do_resize: lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images] if do_rescale: lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images] if do_normalize: lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images] lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] lowerCAmelCase_ : Dict = {'''pixel_values''': images} return BatchFeature(data=__lowercase , tensor_type=__lowercase )
619
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase =logging.get_logger(__name__) _UpperCAmelCase ={ """microsoft/beit-base-patch16-224-pt22k""": ( """https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json""" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = "beit" def __init__( self , __lowercase=8_1_9_2 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=2_2_4 , __lowercase=1_6 , __lowercase=3 , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=0.1 , __lowercase=0.1 , __lowercase=True , __lowercase=[3, 5, 7, 1_1] , __lowercase=[1, 2, 3, 6] , __lowercase=True , __lowercase=0.4 , __lowercase=2_5_6 , __lowercase=1 , __lowercase=False , __lowercase=2_5_5 , **__lowercase , ) -> Optional[Any]: super().__init__(**__lowercase ) lowerCAmelCase_ : List[str] = vocab_size lowerCAmelCase_ : Any = hidden_size lowerCAmelCase_ : Any = num_hidden_layers lowerCAmelCase_ : Optional[Any] = num_attention_heads lowerCAmelCase_ : List[str] = intermediate_size lowerCAmelCase_ : int = hidden_act lowerCAmelCase_ : Dict = hidden_dropout_prob lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob lowerCAmelCase_ : Tuple = initializer_range lowerCAmelCase_ : str = layer_norm_eps lowerCAmelCase_ : Union[str, Any] = image_size lowerCAmelCase_ : Union[str, Any] = patch_size lowerCAmelCase_ : Dict = num_channels lowerCAmelCase_ : Dict = use_mask_token lowerCAmelCase_ : Union[str, Any] = use_absolute_position_embeddings lowerCAmelCase_ : List[Any] = use_relative_position_bias lowerCAmelCase_ : Optional[Any] = use_shared_relative_position_bias lowerCAmelCase_ : Dict = layer_scale_init_value lowerCAmelCase_ : Optional[Any] = drop_path_rate lowerCAmelCase_ : Optional[Any] = use_mean_pooling # decode head attributes (semantic segmentation) lowerCAmelCase_ : Union[str, Any] = out_indices lowerCAmelCase_ : Any = pool_scales # auxiliary head attributes (semantic segmentation) lowerCAmelCase_ : Tuple = use_auxiliary_head lowerCAmelCase_ : Union[str, Any] = auxiliary_loss_weight lowerCAmelCase_ : str = auxiliary_channels lowerCAmelCase_ : str = auxiliary_num_convs lowerCAmelCase_ : Optional[int] = auxiliary_concat_input lowerCAmelCase_ : Union[str, Any] = semantic_loss_ignore_index class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = version.parse("""1.11""" ) @property def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowercase_ ( self ) -> float: return 1e-4
706
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Optional[int] =logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] ={ """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese""" def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str: super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) lowerCAmelCase_ : Optional[Any] = vocab_size lowerCAmelCase_ : Tuple = max_position_embeddings lowerCAmelCase_ : Optional[Any] = hidden_size lowerCAmelCase_ : Optional[Any] = num_hidden_layers lowerCAmelCase_ : str = num_attention_heads lowerCAmelCase_ : str = intermediate_multiple_size lowerCAmelCase_ : str = hidden_act lowerCAmelCase_ : Dict = rotary_pct lowerCAmelCase_ : Union[str, Any] = rotary_emb_base lowerCAmelCase_ : int = initializer_range lowerCAmelCase_ : Any = layer_norm_eps lowerCAmelCase_ : Optional[Any] = use_cache lowerCAmelCase_ : Tuple = attention_dropout lowerCAmelCase_ : Dict = hidden_dropout
619
0
def lowerCAmelCase ( lowerCAmelCase_ )-> set: lowerCAmelCase_ : Union[str, Any] = set() # edges = list of graph's edges lowerCAmelCase_ : Union[str, Any] = get_edges(lowercase_ ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: lowerCAmelCase_ : str = edges.pop() chosen_vertices.add(lowercase_ ) chosen_vertices.add(lowercase_ ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(lowercase_ ) return chosen_vertices def lowerCAmelCase ( lowerCAmelCase_ )-> set: lowerCAmelCase_ : List[str] = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
707
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]: lowerCAmelCase_ : str = parent lowerCAmelCase_ : Optional[Any] = batch_size lowerCAmelCase_ : List[Any] = is_training lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss lowerCAmelCase_ : List[Any] = num_queries lowerCAmelCase_ : str = num_channels lowerCAmelCase_ : Dict = min_size lowerCAmelCase_ : List[str] = max_size lowerCAmelCase_ : Any = num_labels lowerCAmelCase_ : str = mask_feature_size def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowercase ) lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase ) lowerCAmelCase_ : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5 ).float() lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long() lowerCAmelCase_ : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase_ ( self ) -> List[str]: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs() lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def lowercase_ ( self , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int: with torch.no_grad(): lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase ) lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowercase , __lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase ) model.to(__lowercase ) model.eval() def comm_check_on_output(__lowercase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase ) lowerCAmelCase_ : Any = model(__lowercase ) comm_check_on_output(__lowercase ) lowerCAmelCase_ : List[Any] = model( pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase ) comm_check_on_output(__lowercase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Tuple = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : List[str] = False def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Any = MaskFormerModelTester(self ) lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase ) def lowercase_ ( self ) -> Any: self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def lowercase_ ( self ) -> str: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def lowercase_ ( self ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowercase_ ( self ) -> Dict: pass def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Tuple = model_class(__lowercase ) lowerCAmelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : str = [*signature.parameters.keys()] lowerCAmelCase_ : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) @slow def lowercase_ ( self ) -> Optional[int]: for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2 lowerCAmelCase_ : List[Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ), '''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(), } lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase ) lowerCAmelCase_ : Dict = model(**__lowercase ) self.assertTrue(outputs.loss is not None ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase ) def lowercase_ ( self ) -> int: lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase ) lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase ) self.assertTrue(outputs.attentions is not None ) def lowercase_ ( self ) -> List[str]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ : int = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Optional[Any] = model_class(__lowercase ) model.to(__lowercase ) model.train() lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss loss.backward() def lowercase_ ( self ) -> Optional[int]: # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ : Any = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Any = model_class(__lowercase ) model.to(__lowercase ) model.train() lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ) lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowercase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _UpperCAmelCase : Dict =1E-4 def lowerCAmelCase ( )-> Any: lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class snake_case__( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase_ ( self ) -> Union[str, Any]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase ) lowerCAmelCase_ : Dict = self.default_image_processor lowerCAmelCase_ : int = prepare_img() lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : List[str] = model(**__lowercase ) lowerCAmelCase_ : Union[str, Any] = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) lowerCAmelCase_ : List[Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) lowerCAmelCase_ : int = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : Tuple = self.default_image_processor lowerCAmelCase_ : Optional[Any] = prepare_img() lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : Dict = model(**__lowercase ) # masks_queries_logits lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ : Tuple = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) # class_queries_logits lowerCAmelCase_ : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ : Dict = torch.tensor( [ [1.6_512e00, -5.2_572e00, -3.3_519e00], [3.6_169e-02, -5.9_025e00, -2.9_313e00], [1.0_766e-04, -7.7_630e00, -5.1_263e00], ] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : str = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : int = self.default_image_processor lowerCAmelCase_ : Optional[Any] = prepare_img() lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : str = model(**__lowercase ) # masks_queries_logits lowerCAmelCase_ : List[str] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) # class_queries_logits lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ : int = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Dict = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : str = self.default_image_processor lowerCAmelCase_ : Union[str, Any] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase ) lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']] lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']] with torch.no_grad(): lowerCAmelCase_ : str = model(**__lowercase ) self.assertTrue(outputs.loss is not None )
619
0
import string from math import logaa def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int: lowerCAmelCase_ : int = document.translate( str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' ) lowerCAmelCase_ : Optional[int] = document_without_punctuation.split(''' ''' ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> tuple[int, int]: lowerCAmelCase_ : Optional[int] = corpus.lower().translate( str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with '' lowerCAmelCase_ : Optional[Any] = corpus_without_punctuation.split('''\n''' ) lowerCAmelCase_ : str = term.lower() return (len([doc for doc in docs if term in doc] ), len(__UpperCamelCase )) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False )-> float: if smoothing: if n == 0: raise ValueError('''log10(0) is undefined.''' ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError('''df must be > 0''' ) elif n == 0: raise ValueError('''log10(0) is undefined.''' ) return round(logaa(n / df ) , 3 ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> float: return round(tf * idf , 3 )
708
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase__ ) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} ) SCREAMING_SNAKE_CASE__ : str = "audio" SCREAMING_SNAKE_CASE__ : str = "transcription" def lowercase_ ( self , __lowercase ) -> int: if self.audio_column not in features: raise ValueError(f"""Column {self.audio_column} is not present in features.""" ) if not isinstance(features[self.audio_column] , __lowercase ): raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" ) lowerCAmelCase_ : List[str] = copy.deepcopy(self ) lowerCAmelCase_ : Optional[Any] = self.input_schema.copy() lowerCAmelCase_ : Optional[Any] = features[self.audio_column] lowerCAmelCase_ : List[str] = input_schema return task_template @property def lowercase_ ( self ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
619
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _UpperCAmelCase : List[Any] ={} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Any =["""BartphoTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys _UpperCAmelCase : Optional[int] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
709
_UpperCAmelCase : int =frozenset( [ """prompt""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""]) _UpperCAmelCase : Dict =frozenset([]) _UpperCAmelCase : int =frozenset(["""image"""]) _UpperCAmelCase : Tuple =frozenset( [ """image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : int =frozenset(["""image"""]) _UpperCAmelCase : str =frozenset( [ """prompt""", """image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""]) _UpperCAmelCase : Optional[int] =frozenset( [ # Text guided image variation with an image mask """prompt""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""]) _UpperCAmelCase : Optional[Any] =frozenset( [ # image variation with an image mask """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""]) _UpperCAmelCase : Union[str, Any] =frozenset( [ """example_image""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""]) _UpperCAmelCase : Any =frozenset(["""class_labels"""]) _UpperCAmelCase : List[Any] =frozenset(["""class_labels"""]) _UpperCAmelCase : int =frozenset(["""batch_size"""]) _UpperCAmelCase : str =frozenset([]) _UpperCAmelCase : str =frozenset(["""batch_size"""]) _UpperCAmelCase : Optional[Any] =frozenset([]) _UpperCAmelCase : Tuple =frozenset( [ """prompt""", """audio_length_in_s""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""]) _UpperCAmelCase : List[str] =frozenset(["""input_tokens"""]) _UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""])
619
0
from __future__ import annotations from typing import Any def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: if not postfix_notation: return 0 lowerCAmelCase_ : Any = {"""+""", """-""", """*""", """/"""} lowerCAmelCase_ : list[Any] = [] for token in postfix_notation: if token in operations: lowerCAmelCase_ : Optional[Any] = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(snake_case_ ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
710
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int: lowerCAmelCase_ : Dict = 1 lowerCAmelCase_ : List[Any] = 1 lowerCAmelCase_ : Optional[Any] = {1: 1} for inputa in range(2 , lowerCAmelCase_ ): lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Dict = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCAmelCase_ : Any = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCAmelCase_ : Tuple = counter if counter > pre_counter: lowerCAmelCase_ : Optional[int] = inputa lowerCAmelCase_ : Union[str, Any] = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
619
0
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class snake_case__( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def lowercase_ ( self ) -> str: lowerCAmelCase_ : Union[str, Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase__ , '''tf_padding''' ) ) self.parent.assertTrue(hasattr(UpperCamelCase__ , '''depth_multiplier''' ) ) class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=3 , __lowercase=3_2 , __lowercase=0.25 , __lowercase=8 , __lowercase=True , __lowercase=1_0_2_4 , __lowercase=3_2 , __lowercase="relu6" , __lowercase=0.1 , __lowercase=0.02 , __lowercase=True , __lowercase=True , __lowercase=1_0 , __lowercase=None , ) -> Any: lowerCAmelCase_ : Any = parent lowerCAmelCase_ : Dict = batch_size lowerCAmelCase_ : Union[str, Any] = num_channels lowerCAmelCase_ : Any = image_size lowerCAmelCase_ : Optional[int] = depth_multiplier lowerCAmelCase_ : List[str] = min_depth lowerCAmelCase_ : Union[str, Any] = tf_padding lowerCAmelCase_ : Dict = int(last_hidden_size * depth_multiplier ) lowerCAmelCase_ : int = output_stride lowerCAmelCase_ : List[Any] = hidden_act lowerCAmelCase_ : Dict = classifier_dropout_prob lowerCAmelCase_ : List[Any] = use_labels lowerCAmelCase_ : int = is_training lowerCAmelCase_ : Tuple = num_labels lowerCAmelCase_ : Tuple = initializer_range lowerCAmelCase_ : int = scope def lowercase_ ( self ) -> int: lowerCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase_ : str = None lowerCAmelCase_ : Union[str, Any] = None if self.use_labels: lowerCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCAmelCase_ : str = self.get_config() return config, pixel_values, labels, pixel_labels def lowercase_ ( self ) -> Optional[Any]: return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple: lowerCAmelCase_ : Dict = MobileNetVaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCAmelCase_ : int = model(UpperCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[int]: lowerCAmelCase_ : str = self.num_labels lowerCAmelCase_ : int = MobileNetVaForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCAmelCase_ : Union[str, Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : List[str] = self.prepare_config_and_inputs() lowerCAmelCase_ : Any = config_and_inputs lowerCAmelCase_ : Tuple = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class snake_case__( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : List[str] = ( {"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : str = False SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : Optional[Any] = False SCREAMING_SNAKE_CASE__ : List[str] = False def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : List[Any] = MobileNetVaModelTester(self ) lowerCAmelCase_ : Dict = MobileNetVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ ) def lowercase_ ( self ) -> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' ) def lowercase_ ( self ) -> Optional[int]: pass @unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' ) def lowercase_ ( self ) -> Union[str, Any]: pass @unittest.skip(reason='''MobileNetV1 does not output attentions''' ) def lowercase_ ( self ) -> Tuple: pass def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Optional[Any] = model_class(UpperCamelCase__ ) lowerCAmelCase_ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : Any = [*signature.parameters.keys()] lowerCAmelCase_ : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowercase_ ( self ) -> Any: def check_hidden_states_output(__lowercase , __lowercase , __lowercase ): lowerCAmelCase_ : List[str] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): lowerCAmelCase_ : List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCAmelCase_ : Union[str, Any] = outputs.hidden_states lowerCAmelCase_ : Optional[int] = 2_6 self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : List[str] = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase_ : Union[str, Any] = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) @slow def lowercase_ ( self ) -> Any: for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ : Optional[int] = MobileNetVaModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def lowerCAmelCase ( )-> Tuple: lowerCAmelCase_ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class snake_case__( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase_ ( self ) -> List[Any]: return ( MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None ) @slow def lowercase_ ( self ) -> str: lowerCAmelCase_ : Optional[int] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(UpperCamelCase__ ) lowerCAmelCase_ : Optional[int] = self.default_image_processor lowerCAmelCase_ : Optional[int] = prepare_img() lowerCAmelCase_ : int = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCAmelCase_ : List[str] = model(**UpperCamelCase__ ) # verify the logits lowerCAmelCase_ : Union[str, Any] = torch.Size((1, 1_0_0_1) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCAmelCase_ : List[str] = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
711
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : str =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder""" SCREAMING_SNAKE_CASE__ : str = True def __init__( self , **__lowercase ) -> Union[str, Any]: super().__init__(**__lowercase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" lowerCAmelCase_ : str = kwargs.pop('''encoder''' ) lowerCAmelCase_ : int = encoder_config.pop('''model_type''' ) lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' ) lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase ) lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase ) lowerCAmelCase_ : Any = True @classmethod def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig: logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowerCAmelCase_ : int = True lowerCAmelCase_ : List[Any] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ : List[str] = self.encoder.to_dict() lowerCAmelCase_ : Dict = self.decoder.to_dict() lowerCAmelCase_ : Optional[Any] = self.__class__.model_type return output
619
0
'''simple docstring''' import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class snake_case__( lowerCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = CpmAntTokenizer SCREAMING_SNAKE_CASE__ : Union[str, Any] = False def lowercase_ ( self ) -> str: super().setUp() lowerCAmelCase_ : Dict = [ '''<d>''', '''</d>''', '''<s>''', '''</s>''', '''</_>''', '''<unk>''', '''<pad>''', '''</n>''', '''我''', '''是''', '''C''', '''P''', '''M''', '''A''', '''n''', '''t''', ] lowerCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) @tooslow def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : int = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' ) lowerCAmelCase_ : List[str] = '''今天天气真好!''' lowerCAmelCase_ : List[str] = ['''今天''', '''天气''', '''真''', '''好''', '''!'''] lowerCAmelCase_ : Any = tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) lowerCAmelCase_ : Dict = '''今天天气真好!''' lowerCAmelCase_ : List[Any] = [tokenizer.bos_token] + tokens lowerCAmelCase_ : str = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase ) lowerCAmelCase_ : Any = tokenizer.decode(_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase )
712
from __future__ import annotations from math import pi def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]: if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if inductance < 0: raise ValueError('''Inductance cannot be negative''' ) if frequency < 0: raise ValueError('''Frequency cannot be negative''' ) if reactance < 0: raise ValueError('''Inductive reactance cannot be negative''' ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
619
0
# using dfs for finding eulerian path traversal def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None )-> Dict: lowerCAmelCase_ : int = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: lowerCAmelCase_ : Optional[int] = True, True lowerCAmelCase_ : List[Any] = dfs(__snake_case , __snake_case , __snake_case , __snake_case ) return path def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]: lowerCAmelCase_ : Any = 0 lowerCAmelCase_ : Union[str, Any] = -1 for i in range(__snake_case ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 lowerCAmelCase_ : Any = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: lowerCAmelCase_ : str = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] lowerCAmelCase_ : Tuple = check_circuit_or_path(__snake_case , __snake_case ) if check == 3: print('''graph is not Eulerian''' ) print('''no path''' ) return lowerCAmelCase_ : Tuple = 1 if check == 2: lowerCAmelCase_ : Union[str, Any] = odd_node print('''graph has a Euler path''' ) if check == 1: print('''graph has a Euler cycle''' ) lowerCAmelCase_ : str = dfs(__snake_case , __snake_case , __snake_case ) print(__snake_case ) def lowerCAmelCase ( )-> Any: lowerCAmelCase_ : Tuple = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} lowerCAmelCase_ : Tuple = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} lowerCAmelCase_ : Optional[int] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} lowerCAmelCase_ : Union[str, Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]} lowerCAmelCase_ : Tuple = { 1: [], 2: [] # all degree is zero } lowerCAmelCase_ : List[Any] = 10 check_euler(__snake_case , __snake_case ) check_euler(__snake_case , __snake_case ) check_euler(__snake_case , __snake_case ) check_euler(__snake_case , __snake_case ) check_euler(__snake_case , __snake_case ) if __name__ == "__main__": main()
713
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging _UpperCAmelCase : Tuple =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = """linear""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine""" SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts""" SCREAMING_SNAKE_CASE__ : List[str] = """polynomial""" SCREAMING_SNAKE_CASE__ : Dict = """constant""" SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup""" SCREAMING_SNAKE_CASE__ : str = """piecewise_constant""" def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple: return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) ) return 1.0 return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int: lowerCAmelCase_ : Optional[int] = {} lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' ) for rule_str in rule_list[:-1]: lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' ) lowerCAmelCase_ : int = int(lowerCAmelCase_ ) lowerCAmelCase_ : str = float(lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = value lowerCAmelCase_ : int = float(rule_list[-1] ) def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ): def rule_func(lowerCAmelCase_ ) -> float: lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(lowerCAmelCase_ ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any: lowerCAmelCase_ : Dict = optimizer.defaults['''lr'''] if not (lr_init > lr_end): raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lowerCAmelCase_ : List[Any] = lr_init - lr_end lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _UpperCAmelCase : Union[str, Any] ={ SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]: lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ ) lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , ) return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
619
0
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore _UpperCAmelCase : List[str] =list(good_file_paths()) assert filepaths, "good_file_paths() failed!" _UpperCAmelCase : Union[str, Any] =[file for file in filepaths if file != file.lower()] if upper_files: print(f"""{len(upper_files)} files contain uppercase characters:""") print("""\n""".join(upper_files) + """\n""") _UpperCAmelCase : int =[file for file in filepaths if """ """ in file] if space_files: print(f"""{len(space_files)} files contain space characters:""") print("""\n""".join(space_files) + """\n""") _UpperCAmelCase : List[str] =[file for file in filepaths if """-""" in file] if hyphen_files: print(f"""{len(hyphen_files)} files contain hyphen characters:""") print("""\n""".join(hyphen_files) + """\n""") _UpperCAmelCase : List[str] =[file for file in filepaths if os.sep not in file] if nodir_files: print(f"""{len(nodir_files)} files are not in a directory:""") print("""\n""".join(nodir_files) + """\n""") _UpperCAmelCase : Union[str, Any] =len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
714
from __future__ import annotations def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative in a semiconductor''' ) elif hole_conc < 0: raise ValueError('''Hole concentration cannot be negative in a semiconductor''' ) elif intrinsic_conc < 0: raise ValueError( '''Intrinsic concentration cannot be negative in a semiconductor''' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
619
0
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": _UpperCAmelCase : Optional[int] =pd.read_csv("""sample_data.csv""", header=None) _UpperCAmelCase : Optional[int] =df.shape[:1][0] # If you're using some other dataset input the target column _UpperCAmelCase : Optional[int] =df.iloc[:, 1:2] _UpperCAmelCase : List[str] =actual_data.values.reshape(len_data, 1) _UpperCAmelCase : Any =MinMaxScaler().fit_transform(actual_data) _UpperCAmelCase : Union[str, Any] =10 _UpperCAmelCase : Dict =5 _UpperCAmelCase : Optional[int] =20 _UpperCAmelCase : Optional[Any] =len_data - periods * look_back _UpperCAmelCase : int =actual_data[:division] _UpperCAmelCase : List[Any] =actual_data[division - look_back :] _UpperCAmelCase , _UpperCAmelCase : int =[], [] _UpperCAmelCase , _UpperCAmelCase : Optional[Any] =[], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) _UpperCAmelCase : List[Any] =np.array(train_x) _UpperCAmelCase : str =np.array(test_x) _UpperCAmelCase : List[Any] =np.array([list(i.ravel()) for i in train_y]) _UpperCAmelCase : Dict =np.array([list(i.ravel()) for i in test_y]) _UpperCAmelCase : str =Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") _UpperCAmelCase : Tuple =model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) _UpperCAmelCase : Optional[int] =model.predict(x_test)
715
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _UpperCAmelCase : Any ="""src/transformers""" # This is to make sure the transformers module imported is the one in the repo. _UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS) _UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` _UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") _UpperCAmelCase : Any ={ """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : Any = None # source code of `config_class` lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ ) lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/''' ): lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: lowerCAmelCase_ : List[str] = ckpt_name break return checkpoint def lowerCAmelCase ( )-> Optional[Any]: lowerCAmelCase_ : Tuple = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ ) lowerCAmelCase_ : Tuple = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) ) raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
619
0
import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder _UpperCAmelCase : Any ='''__DUMMY_TRANSFORMERS_USER__''' _UpperCAmelCase : Optional[int] ='''Dummy User''' _UpperCAmelCase : str ='''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''' _UpperCAmelCase : Optional[int] ='''https://hub-ci.huggingface.co''' _UpperCAmelCase : List[str] =CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}''' _UpperCAmelCase : str =CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}''' _UpperCAmelCase : List[str] =Path("""~/.huggingface/hub_ci_token""").expanduser() @pytest.fixture def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]: monkeypatch.setattr( '''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , _UpperCAmelCase ) @pytest.fixture def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]: monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , _UpperCAmelCase ) monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , _UpperCAmelCase ) @pytest.fixture def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , _UpperCAmelCase ) @pytest.fixture def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]: HfFolder.save_token(_UpperCAmelCase ) yield HfFolder.delete_token() @pytest.fixture(scope='''session''' ) def lowerCAmelCase ( )-> Dict: return HfApi(endpoint=_UpperCAmelCase ) @pytest.fixture(scope='''session''' ) def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : Optional[int] = HfFolder.get_token() HfFolder.save_token(_UpperCAmelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_UpperCAmelCase ) @pytest.fixture def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]: def _cleanup_repo(lowerCAmelCase_ ): hf_api.delete_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' ) return _cleanup_repo @pytest.fixture def lowerCAmelCase ( lowerCAmelCase_ )-> Union[str, Any]: @contextmanager def _temporary_repo(lowerCAmelCase_ ): try: yield repo_id finally: cleanup_repo(_UpperCAmelCase ) return _temporary_repo @pytest.fixture(scope='''session''' ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]: lowerCAmelCase_ : List[str] = f"""repo_txt_data-{int(time.time() * 1_0e3 )}""" lowerCAmelCase_ : List[str] = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' , private=_UpperCAmelCase ) hf_api.upload_file( token=_UpperCAmelCase , path_or_fileobj=str(_UpperCAmelCase ) , path_in_repo='''data/text_data.txt''' , repo_id=_UpperCAmelCase , repo_type='''dataset''' , ) yield repo_id try: hf_api.delete_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict: return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope='''session''' ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Any: lowerCAmelCase_ : Any = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}""" lowerCAmelCase_ : int = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' , private=_UpperCAmelCase ) hf_api.upload_file( token=_UpperCAmelCase , path_or_fileobj=str(_UpperCAmelCase ) , path_in_repo='''data.zip''' , repo_id=_UpperCAmelCase , repo_type='''dataset''' , ) yield repo_id try: hf_api.delete_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]: return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope='''session''' ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]: lowerCAmelCase_ : str = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}""" lowerCAmelCase_ : List[str] = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' , private=_UpperCAmelCase ) hf_api.upload_file( token=_UpperCAmelCase , path_or_fileobj=str(_UpperCAmelCase ) , path_in_repo='''data.zip''' , repo_id=_UpperCAmelCase , repo_type='''dataset''' , ) yield repo_id try: hf_api.delete_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='''dataset''' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict: return hf_private_dataset_repo_zipped_img_data_
716
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple: lowerCAmelCase_ : Optional[int] = scheduler lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers] lowerCAmelCase_ : str = split_batches lowerCAmelCase_ : Any = step_with_optimizer lowerCAmelCase_ : Optional[Any] = GradientState() def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__lowercase , **__lowercase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__lowercase , **__lowercase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes for _ in range(__lowercase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , '''total_steps''' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__lowercase , **__lowercase ) else: self.scheduler.step(*__lowercase , **__lowercase ) def lowercase_ ( self ) -> Union[str, Any]: return self.scheduler.get_last_lr() def lowercase_ ( self ) -> List[str]: return self.scheduler.state_dict() def lowercase_ ( self , __lowercase ) -> int: self.scheduler.load_state_dict(__lowercase ) def lowercase_ ( self ) -> Tuple: return self.scheduler.get_lr() def lowercase_ ( self , *__lowercase , **__lowercase ) -> int: return self.scheduler.print_lr(*__lowercase , **__lowercase )
619
0
from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig _UpperCAmelCase : Any ={ """susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""", """susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""", } class snake_case__( UpperCamelCase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = """ernie_m""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""dropout""": """classifier_dropout""", """num_classes""": """num_labels"""} def __init__( self , __lowercase = 2_5_0_0_0_2 , __lowercase = 7_6_8 , __lowercase = 1_2 , __lowercase = 1_2 , __lowercase = 3_0_7_2 , __lowercase = "gelu" , __lowercase = 0.1 , __lowercase = 0.1 , __lowercase = 5_1_4 , __lowercase = 0.02 , __lowercase = 1 , __lowercase = 1e-05 , __lowercase=None , __lowercase=False , __lowercase=0.0 , **__lowercase , ) -> List[str]: super().__init__(pad_token_id=__a , **__a ) lowerCAmelCase_ : Optional[Any] = vocab_size lowerCAmelCase_ : List[Any] = hidden_size lowerCAmelCase_ : Any = num_hidden_layers lowerCAmelCase_ : List[str] = num_attention_heads lowerCAmelCase_ : Union[str, Any] = intermediate_size lowerCAmelCase_ : str = hidden_act lowerCAmelCase_ : str = hidden_dropout_prob lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase_ : Optional[Any] = max_position_embeddings lowerCAmelCase_ : Tuple = initializer_range lowerCAmelCase_ : Tuple = layer_norm_eps lowerCAmelCase_ : List[Any] = classifier_dropout lowerCAmelCase_ : Union[str, Any] = is_decoder lowerCAmelCase_ : Optional[Any] = act_dropout
717
from manim import * class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : int = [mem.copy() for i in range(6 )] lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 ) lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowercase ) lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )] lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 ) lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) gpu.move_to([-1, -1, 0] ) self.add(__lowercase ) lowerCAmelCase_ : str = [mem.copy() for i in range(6 )] lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 ) lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) model.move_to([3, -1.0, 0] ) self.add(__lowercase ) lowerCAmelCase_ : int = [] lowerCAmelCase_ : int = [] lowerCAmelCase_ : Dict = [] for i, rect in enumerate(__lowercase ): rect.set_stroke(__lowercase ) lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 ) self.add(__lowercase ) model_cpu_arr.append(__lowercase ) self.add(*__lowercase , *__lowercase , *__lowercase ) lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 ) lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__lowercase ) lowerCAmelCase_ : Optional[Any] = [] lowerCAmelCase_ : Dict = [] for i, rect in enumerate(__lowercase ): lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 ) target.move_to(__lowercase ) ckpt_arr.append(__lowercase ) lowerCAmelCase_ : Union[str, Any] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__lowercase ) self.add(*__lowercase , *__lowercase ) lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase_ : str = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowercase , __lowercase ) lowerCAmelCase_ : str = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , ) blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__lowercase ) lowerCAmelCase_ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )] lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 ) lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) ) lowerCAmelCase_ : int = [] for i, rect in enumerate(__lowercase ): lowerCAmelCase_ : int = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__lowercase , run_time=1.5 ) ) self.play(*__lowercase ) self.play(FadeOut(__lowercase ) ) lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowercase , run_time=3 ) ) self.play( FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , ) self.wait()
619
0
import math import tensorflow as tf from packaging import version def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: lowerCAmelCase_ : Optional[int] = tf.convert_to_tensor(_lowercase ) lowerCAmelCase_ : str = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def lowerCAmelCase ( lowerCAmelCase_ )-> Union[str, Any]: lowerCAmelCase_ : List[str] = tf.convert_to_tensor(_lowercase ) lowerCAmelCase_ : str = tf.cast(math.pi , x.dtype ) lowerCAmelCase_ : Union[str, Any] = tf.cast(0.044715 , x.dtype ) lowerCAmelCase_ : Any = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowercase , 3 )) )) return x * cdf def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: lowerCAmelCase_ : List[Any] = tf.convert_to_tensor(_lowercase ) return x * tf.tanh(tf.math.softplus(_lowercase ) ) def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: lowerCAmelCase_ : List[str] = tf.convert_to_tensor(_lowercase ) lowerCAmelCase_ : Dict = tf.cast(0.044715 , x.dtype ) lowerCAmelCase_ : Tuple = tf.cast(0.7978845608 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]: lowerCAmelCase_ : List[str] = tf.convert_to_tensor(_lowercase ) lowerCAmelCase_ : Optional[int] = tf.cast(1.702 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: return tf.clip_by_value(_gelu(_lowercase ) , -10 , 10 ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Dict: lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tf.split(_lowercase , 2 , axis=_lowercase ) return a * tf.math.sigmoid(_lowercase ) if version.parse(tf.version.VERSION) >= version.parse("""2.4"""): def lowerCAmelCase ( lowerCAmelCase_ )-> Union[str, Any]: return tf.keras.activations.gelu(_lowercase , approximate=_lowercase ) _UpperCAmelCase : Union[str, Any] =tf.keras.activations.gelu _UpperCAmelCase : Tuple =approximate_gelu_wrap else: _UpperCAmelCase : Tuple =_gelu _UpperCAmelCase : Union[str, Any] =_gelu_new _UpperCAmelCase : str ={ """gelu""": gelu, """gelu_10""": gelu_aa, """gelu_fast""": gelu_fast, """gelu_new""": gelu_new, """glu""": glu, """mish""": mish, """quick_gelu""": quick_gelu, """relu""": tf.keras.activations.relu, """sigmoid""": tf.keras.activations.sigmoid, """silu""": tf.keras.activations.swish, """swish""": tf.keras.activations.swish, """tanh""": tf.keras.activations.tanh, } def lowerCAmelCase ( lowerCAmelCase_ )-> int: if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
718
_UpperCAmelCase : Dict =[ (1000, """M"""), (900, """CM"""), (500, """D"""), (400, """CD"""), (100, """C"""), (90, """XC"""), (50, """L"""), (40, """XL"""), (10, """X"""), (9, """IX"""), (5, """V"""), (4, """IV"""), (1, """I"""), ] def lowerCAmelCase ( lowerCAmelCase_ )-> int: lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000} lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : List[str] = 0 while place < len(lowerCAmelCase_ ): if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : List[Any] = [] for arabic, roman in ROMAN: ((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ ) result.append(roman * factor ) if number == 0: break return "".join(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
619
0
from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = 42 # [batch_size x 3] SCREAMING_SNAKE_CASE__ : Any = 42 # [batch_size x 3] SCREAMING_SNAKE_CASE__ : int = 42 # [batch_size x 3] SCREAMING_SNAKE_CASE__ : Any = 42 # [batch_size x 3] SCREAMING_SNAKE_CASE__ : Union[str, Any] = 42 SCREAMING_SNAKE_CASE__ : Dict = 42 SCREAMING_SNAKE_CASE__ : List[str] = 42 SCREAMING_SNAKE_CASE__ : Dict = 42 SCREAMING_SNAKE_CASE__ : Optional[int] = 42 def lowercase_ ( self ) -> str: assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def lowercase_ ( self ) -> str: return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def lowercase_ ( self ) -> str: return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def lowercase_ ( self ) -> int: lowerCAmelCase_ : Union[str, Any] = torch.arange(self.height * self.width ) lowerCAmelCase_ : Tuple = torch.stack( [ pixel_indices % self.width, torch.div(_lowercase , self.width , rounding_mode='''trunc''' ), ] , axis=1 , ) return coords @property def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Union[str, Any] = self.shape lowerCAmelCase_ : int = int(np.prod(_lowercase ) ) lowerCAmelCase_ : Union[str, Any] = self.get_image_coords() lowerCAmelCase_ : List[Any] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) lowerCAmelCase_ : List[str] = self.get_camera_rays(_lowercase ) lowerCAmelCase_ : Union[str, Any] = rays.view(_lowercase , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def lowercase_ ( self , __lowercase ) -> List[str]: lowerCAmelCase_ : str = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] lowerCAmelCase_ : List[str] = coords.view(_lowercase , -1 , 2 ) lowerCAmelCase_ : List[Any] = self.resolution() lowerCAmelCase_ : List[Any] = self.fov() lowerCAmelCase_ : List[str] = (flat.float() / (res - 1)) * 2 - 1 lowerCAmelCase_ : int = fracs * torch.tan(fov / 2 ) lowerCAmelCase_ : Optional[int] = fracs.view(_lowercase , -1 , 2 ) lowerCAmelCase_ : Optional[int] = ( self.z.view(_lowercase , 1 , 3 ) + self.x.view(_lowercase , 1 , 3 ) * fracs[:, :, :1] + self.y.view(_lowercase , 1 , 3 ) * fracs[:, :, 1:] ) lowerCAmelCase_ : List[str] = directions / directions.norm(dim=-1 , keepdim=_lowercase ) lowerCAmelCase_ : int = torch.stack( [ torch.broadcast_to(self.origin.view(_lowercase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(_lowercase , *_lowercase , 2 , 3 ) def lowercase_ ( self , __lowercase , __lowercase ) -> int: assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=_lowercase , height=_lowercase , x_fov=self.x_fov , y_fov=self.y_fov , ) def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple: lowerCAmelCase_ : int = [] lowerCAmelCase_ : str = [] lowerCAmelCase_ : Optional[int] = [] lowerCAmelCase_ : Optional[Any] = [] for theta in np.linspace(0 , 2 * np.pi , num=20 ): lowerCAmelCase_ : str = np.array([np.sin(_lowerCamelCase ), np.cos(_lowerCamelCase ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) lowerCAmelCase_ : Any = -z * 4 lowerCAmelCase_ : Union[str, Any] = np.array([np.cos(_lowerCamelCase ), -np.sin(_lowerCamelCase ), 0.0] ) lowerCAmelCase_ : Dict = np.cross(_lowerCamelCase , _lowerCamelCase ) origins.append(_lowerCamelCase ) xs.append(_lowerCamelCase ) ys.append(_lowerCamelCase ) zs.append(_lowerCamelCase ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(_lowerCamelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_lowerCamelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_lowerCamelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_lowerCamelCase , axis=0 ) ).float() , width=_lowerCamelCase , height=_lowerCamelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_lowerCamelCase )) , )
719
import csv import tweepy # Twitter API credentials _UpperCAmelCase : int ="""""" _UpperCAmelCase : Optional[int] ="""""" _UpperCAmelCase : Dict ="""""" _UpperCAmelCase : str ="""""" def lowerCAmelCase ( lowerCAmelCase_ )-> None: # authorize twitter, initialize tweepy lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ ) auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ ) # initialize a list to hold all the tweepy Tweets lowerCAmelCase_ : Dict = [] # make initial request for most recent tweets (200 is the maximum allowed count) lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # save the id of the oldest tweet less one lowerCAmelCase_ : str = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowerCAmelCase_ ) > 0: print(f"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates lowerCAmelCase_ : Optional[Any] = api.user_timeline( screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # update the id of the oldest tweet less one lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1 print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f: lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(lowerCAmelCase_ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
619
0
_UpperCAmelCase : int =tuple[float, float, float] _UpperCAmelCase : Optional[Any] =tuple[float, float, float] def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Vectorad: lowerCAmelCase_ : List[Any] = end_pointa[0] - end_pointa[0] lowerCAmelCase_ : Union[str, Any] = end_pointa[1] - end_pointa[1] lowerCAmelCase_ : List[str] = end_pointa[2] - end_pointa[2] return (x, y, z) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Vectorad: lowerCAmelCase_ : Optional[int] = ab[1] * ac[2] - ab[2] * ac[1] # *i lowerCAmelCase_ : Dict = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j lowerCAmelCase_ : Dict = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> bool: return tuple(round(lowercase_ , lowercase_ ) for x in vector ) == (0, 0, 0) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 )-> bool: lowerCAmelCase_ : Tuple = create_vector(lowercase_ , lowercase_ ) lowerCAmelCase_ : Dict = create_vector(lowercase_ , lowercase_ ) return is_zero_vector(get_ad_vectors_cross(lowercase_ , lowercase_ ) , lowercase_ )
720
from math import sqrt def lowerCAmelCase ( lowerCAmelCase_ )-> bool: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase_ : str = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase_ : List[Any] = False for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase_ : Any = False break # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool" return status def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) ) lowerCAmelCase_ : List[Any] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowerCAmelCase_ ) ): for j in range(i + 1 , len(lowerCAmelCase_ ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase_ : Tuple = 0 # filters actual prime numbers. lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0] # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> int: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase_ : List[str] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(lowerCAmelCase_ ): ans.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase_ : Any = 2 lowerCAmelCase_ : List[str] = number if number == 0 or number == 1: ans.append(lowerCAmelCase_ ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowerCAmelCase_ ): while quotient != 1: if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0): ans.append(lowerCAmelCase_ ) quotient /= factor else: factor += 1 else: ans.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = 0 # prime factorization of 'number' lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : Dict = max(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> str: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = 0 # prime factorization of 'number' lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int" assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool" return number % 2 == 0 def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int" assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool" return number % 2 != 0 def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ ) ), "'number' must been an int, even and > 2" lowerCAmelCase_ : Union[str, Any] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ ) lowerCAmelCase_ : Any = len(lowerCAmelCase_ ) # run variable for while-loops. lowerCAmelCase_ : List[Any] = 0 lowerCAmelCase_ : List[Any] = None # exit variable. for break up the loops lowerCAmelCase_ : int = True while i < len_pn and loop: lowerCAmelCase_ : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase_ : Union[str, Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (len(lowerCAmelCase_ ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase_ : List[str] = 0 while numbera != 0: lowerCAmelCase_ : int = numbera % numbera lowerCAmelCase_ : Union[str, Any] = numbera lowerCAmelCase_ : Tuple = rest # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase_ : Dict = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ ) elif numbera == 1 or numbera == 1: lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : str = 0 lowerCAmelCase_ : List[str] = 0 lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ): ans *= n else: lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ): ans *= n done.append(lowerCAmelCase_ ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ): ans *= n done.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowerCAmelCase_ ): ans += 1 # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime( lowerCAmelCase_ ), "'ans' must been a prime number and from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict: assert ( is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number lowerCAmelCase_ : Dict = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowerCAmelCase_ ): number += 1 while number < p_number_a: ans.append(lowerCAmelCase_ ) number += 1 # fetch the next prime number. while not is_prime(lowerCAmelCase_ ): number += 1 # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ans[0] != p_number_a and ans[len(lowerCAmelCase_ ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase_ : List[str] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(lowerCAmelCase_ ) # precondition assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ ) # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (divisors[0] == 1) and (divisors[len(lowerCAmelCase_ ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) ) # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase_ : Tuple = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Optional[Any] = 1 lowerCAmelCase_ : Tuple = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase_ : Any = ans ans += fiba lowerCAmelCase_ : Dict = tmp return ans
619
0
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]: if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase_ : Tuple = np.full((len(lowerCAmelCase__ ), sequence_length, 2) , lowerCAmelCase__ ) else: lowerCAmelCase_ : List[Any] = np.full((len(lowerCAmelCase__ ), sequence_length) , lowerCAmelCase__ ) for i, tensor in enumerate(lowerCAmelCase__ ): if padding_side == "right": if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase_ : Dict = tensor[:sequence_length] else: lowerCAmelCase_ : Tuple = tensor[:sequence_length] else: if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase_ : str = tensor[:sequence_length] else: lowerCAmelCase_ : Tuple = tensor[:sequence_length] return out_tensor.tolist() def lowerCAmelCase ( lowerCAmelCase_ )-> Union[str, Any]: lowerCAmelCase_ : Any = ord(lowerCAmelCase__ ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True lowerCAmelCase_ : Dict = unicodedata.category(lowerCAmelCase__ ) if cat.startswith('''P''' ): return True return False @dataclass class snake_case__( __SCREAMING_SNAKE_CASE ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : PreTrainedTokenizerBase SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = True SCREAMING_SNAKE_CASE__ : Optional[int] = None SCREAMING_SNAKE_CASE__ : Optional[int] = None SCREAMING_SNAKE_CASE__ : int = -100 SCREAMING_SNAKE_CASE__ : str = "pt" def lowercase_ ( self , __lowercase ) -> int: import torch lowerCAmelCase_ : int = '''label''' if '''label''' in features[0].keys() else '''labels''' lowerCAmelCase_ : List[str] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None lowerCAmelCase_ : Union[str, Any] = self.tokenizer.pad( _a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch lowerCAmelCase_ : Tuple = torch.tensor(batch['''entity_ids'''] ).shape[1] lowerCAmelCase_ : Any = self.tokenizer.padding_side if padding_side == "right": lowerCAmelCase_ : int = [ list(_a ) + [self.label_pad_token_id] * (sequence_length - len(_a )) for label in labels ] else: lowerCAmelCase_ : int = [ [self.label_pad_token_id] * (sequence_length - len(_a )) + list(_a ) for label in labels ] lowerCAmelCase_ : List[str] = [feature['''ner_tags'''] for feature in features] lowerCAmelCase_ : Union[str, Any] = padding_tensor(_a , -1 , _a , _a ) lowerCAmelCase_ : str = [feature['''original_entity_spans'''] for feature in features] lowerCAmelCase_ : Optional[int] = padding_tensor(_a , (-1, -1) , _a , _a ) lowerCAmelCase_ : Dict = {k: torch.tensor(_a , dtype=torch.intaa ) for k, v in batch.items()} return batch
721
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. _UpperCAmelCase : Tuple =10 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int: for i in range(lowerCAmelCase_ , lowerCAmelCase_ ): if array[i] == target: return i return -1 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int: lowerCAmelCase_ : Any = 0 lowerCAmelCase_ : int = len(lowerCAmelCase_ ) while left <= right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1 lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: lowerCAmelCase_ : Dict = one_third - 1 elif array[two_third] < target: lowerCAmelCase_ : List[Any] = two_third + 1 else: lowerCAmelCase_ : Union[str, Any] = one_third + 1 lowerCAmelCase_ : Tuple = two_third - 1 else: return -1 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int: if left < right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1 lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip() _UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." _UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip()) _UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target) _UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f"""Iterative search: {target} found at positions: {resulta}""") print(f"""Recursive search: {target} found at positions: {resulta}""") else: print("""Not found""")
619
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _UpperCAmelCase : str ={ """configuration_poolformer""": [ """POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PoolFormerConfig""", """PoolFormerOnnxConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : str =["""PoolFormerFeatureExtractor"""] _UpperCAmelCase : List[str] =["""PoolFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : List[Any] =[ """POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """PoolFormerForImageClassification""", """PoolFormerModel""", """PoolFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys _UpperCAmelCase : Tuple =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
700
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _UpperCAmelCase : Union[str, Any] ={ """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Tuple =[ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys _UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
619
0
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple: if divisor % 5 == 0 or divisor % 2 == 0: return 0 lowerCAmelCase_ : Union[str, Any] = 1 lowerCAmelCase_ : str = 1 while repunit: lowerCAmelCase_ : Union[str, Any] = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> Optional[int]: lowerCAmelCase_ : Union[str, Any] = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(lowerCAmelCase_ ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f"""{solution() = }""")
701
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu _UpperCAmelCase : Any =False class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase_ ( self ) -> Union[str, Any]: return 1_2 @property def lowercase_ ( self ) -> Any: return 1_2 @property def lowercase_ ( self ) -> Optional[Any]: return 3_2 @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ : Any = VQModel( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(__lowercase ) @property def lowercase_ ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = 1_2 lowerCAmelCase_ : int = 1_2 lowerCAmelCase_ : Union[str, Any] = { '''attention_bias''': True, '''cross_attention_dim''': 3_2, '''attention_head_dim''': height * width, '''num_attention_heads''': 1, '''num_vector_embeds''': self.num_embed, '''num_embeds_ada_norm''': self.num_embeds_ada_norm, '''norm_num_groups''': 3_2, '''sample_size''': width, '''activation_fn''': '''geglu-approximate''', } lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase ) return model def lowercase_ ( self ) -> str: lowerCAmelCase_ : List[Any] = '''cpu''' lowerCAmelCase_ : Any = self.dummy_vqvae lowerCAmelCase_ : str = self.dummy_text_encoder lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer lowerCAmelCase_ : int = self.dummy_transformer lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase ) lowerCAmelCase_ : Dict = VQDiffusionPipeline( vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) lowerCAmelCase_ : int = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = '''teddy bear playing in the pool''' lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase_ : Union[str, Any] = output.images lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : List[Any] = pipe( [prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0] lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : Optional[Any] = '''cpu''' lowerCAmelCase_ : str = self.dummy_vqvae lowerCAmelCase_ : Dict = self.dummy_text_encoder lowerCAmelCase_ : List[Any] = self.dummy_tokenizer lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings( learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) lowerCAmelCase_ : List[str] = VQDiffusionPipeline( vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = '''teddy bear playing in the pool''' lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase_ : str = output.images lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = pipe( [prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0] lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> int: lowerCAmelCase_ : Tuple = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' ) lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' ) lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase ) pipeline.set_progress_bar_config(disable=__lowercase ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Optional[int] = pipeline( '''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , ) lowerCAmelCase_ : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) assert np.abs(expected_image - image ).max() < 2.0
619
0
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) _UpperCAmelCase : Optional[int] =logging.getLogger(__name__) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ): lowerCAmelCase_ : Optional[Any] = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 ) return np.sum(outputs == labels ) def lowerCAmelCase ( lowerCAmelCase_ ): with open(SCREAMING_SNAKE_CASE_ , encoding='''utf_8''' ) as f: lowerCAmelCase_ : int = csv.reader(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : List[Any] = [] next(SCREAMING_SNAKE_CASE_ ) # skip the first line for line in tqdm(SCREAMING_SNAKE_CASE_ ): output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): lowerCAmelCase_ : List[str] = [] for dataset in encoded_datasets: lowerCAmelCase_ : int = len(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : str = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) lowerCAmelCase_ : Union[str, Any] = np.zeros((n_batch, 2) , dtype=np.intaa ) lowerCAmelCase_ : Dict = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa ) lowerCAmelCase_ : int = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(SCREAMING_SNAKE_CASE_ ): lowerCAmelCase_ : List[str] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCAmelCase_ : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCAmelCase_ : str = with_conta lowerCAmelCase_ : int = with_conta lowerCAmelCase_ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1 lowerCAmelCase_ : List[str] = len(SCREAMING_SNAKE_CASE_ ) - 1 lowerCAmelCase_ : List[str] = with_conta lowerCAmelCase_ : Optional[Any] = with_conta lowerCAmelCase_ : Optional[int] = mc_label lowerCAmelCase_ : Optional[Any] = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) ) return tensor_datasets def lowerCAmelCase ( ): lowerCAmelCase_ : List[str] = argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=SCREAMING_SNAKE_CASE_ , default='''openai-gpt''' , help='''pretrained model name''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' ) parser.add_argument( '''--output_dir''' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument('''--train_dataset''' , type=SCREAMING_SNAKE_CASE_ , default='''''' ) parser.add_argument('''--eval_dataset''' , type=SCREAMING_SNAKE_CASE_ , default='''''' ) parser.add_argument('''--seed''' , type=SCREAMING_SNAKE_CASE_ , default=42 ) parser.add_argument('''--num_train_epochs''' , type=SCREAMING_SNAKE_CASE_ , default=3 ) parser.add_argument('''--train_batch_size''' , type=SCREAMING_SNAKE_CASE_ , default=8 ) parser.add_argument('''--eval_batch_size''' , type=SCREAMING_SNAKE_CASE_ , default=16 ) parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , type=SCREAMING_SNAKE_CASE_ , default=1 ) parser.add_argument( '''--max_steps''' , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=( '''If > 0: set total number of training steps to perform. Override num_train_epochs.''' ) , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--learning_rate''' , type=SCREAMING_SNAKE_CASE_ , default=6.2_5e-5 ) parser.add_argument('''--warmup_steps''' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--lr_schedule''' , type=SCREAMING_SNAKE_CASE_ , default='''warmup_linear''' ) parser.add_argument('''--weight_decay''' , type=SCREAMING_SNAKE_CASE_ , default=0.01 ) parser.add_argument('''--lm_coef''' , type=SCREAMING_SNAKE_CASE_ , default=0.9 ) parser.add_argument('''--n_valid''' , type=SCREAMING_SNAKE_CASE_ , default=374 ) parser.add_argument('''--server_ip''' , type=SCREAMING_SNAKE_CASE_ , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=SCREAMING_SNAKE_CASE_ , default='''''' , help='''Can be used for distant debugging.''' ) lowerCAmelCase_ : Optional[int] = parser.parse_args() print(SCREAMING_SNAKE_CASE_ ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) lowerCAmelCase_ : Union[str, Any] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) lowerCAmelCase_ : List[str] = torch.cuda.device_count() logger.info('''device: {}, n_gpu {}'''.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) if not args.do_train and not args.do_eval: raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset lowerCAmelCase_ : Any = ["_start_", "_delimiter_", "_classify_"] lowerCAmelCase_ : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) ) model.to(SCREAMING_SNAKE_CASE_ ) # Load and encode the datasets def tokenize_and_encode(lowerCAmelCase_ ): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return obj return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj] logger.info('''Encoding dataset...''' ) lowerCAmelCase_ : Optional[Any] = load_rocstories_dataset(args.train_dataset ) lowerCAmelCase_ : Union[str, Any] = load_rocstories_dataset(args.eval_dataset ) lowerCAmelCase_ : Dict = (train_dataset, eval_dataset) lowerCAmelCase_ : Optional[Any] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) # Compute the max input length for the Transformer lowerCAmelCase_ : List[str] = model.config.n_positions // 2 - 2 lowerCAmelCase_ : int = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) lowerCAmelCase_ : str = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders lowerCAmelCase_ : Union[str, Any] = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : List[Any] = tensor_datasets[0], tensor_datasets[1] lowerCAmelCase_ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Any = RandomSampler(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : str = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size ) lowerCAmelCase_ : Any = TensorDataset(*SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Tuple = SequentialSampler(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: lowerCAmelCase_ : str = args.max_steps lowerCAmelCase_ : Any = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1 else: lowerCAmelCase_ : Any = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs lowerCAmelCase_ : Optional[int] = list(model.named_parameters() ) lowerCAmelCase_ : Any = ["bias", "LayerNorm.bias", "LayerNorm.weight"] lowerCAmelCase_ : Optional[Any] = [ { "params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], "weight_decay": args.weight_decay, }, {"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0}, ] lowerCAmelCase_ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon ) lowerCAmelCase_ : List[Any] = get_linear_schedule_with_warmup( SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ ) if args.do_train: lowerCAmelCase_ : Optional[int] = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ): lowerCAmelCase_ : List[Any] = 0 lowerCAmelCase_ : int = 0 lowerCAmelCase_ : int = tqdm(SCREAMING_SNAKE_CASE_ , desc='''Training''' ) for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): lowerCAmelCase_ : Tuple = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch ) lowerCAmelCase_ : Tuple = batch lowerCAmelCase_ : List[str] = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Union[str, Any] = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() lowerCAmelCase_ : Optional[Any] = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 lowerCAmelCase_ : Optional[int] = "Training loss: {:.2e} lr: {:.2e}".format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer lowerCAmelCase_ : Dict = model.module if hasattr(SCREAMING_SNAKE_CASE_ , '''module''' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` lowerCAmelCase_ : List[Any] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : str = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ ) torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ ) model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned lowerCAmelCase_ : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) lowerCAmelCase_ : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(SCREAMING_SNAKE_CASE_ ) if args.do_eval: model.eval() lowerCAmelCase_ : Optional[Any] = 0, 0 lowerCAmelCase_ : Optional[int] = 0, 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc='''Evaluating''' ): lowerCAmelCase_ : Tuple = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch ) lowerCAmelCase_ : Optional[Any] = batch with torch.no_grad(): lowerCAmelCase_ : Tuple = model( SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Tuple = mc_logits.detach().cpu().numpy() lowerCAmelCase_ : List[Any] = mc_labels.to('''cpu''' ).numpy() lowerCAmelCase_ : Union[str, Any] = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 lowerCAmelCase_ : Optional[Any] = eval_loss / nb_eval_steps lowerCAmelCase_ : List[Any] = eval_accuracy / nb_eval_examples lowerCAmelCase_ : int = tr_loss / nb_tr_steps if args.do_train else None lowerCAmelCase_ : Tuple = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss} lowerCAmelCase_ : Optional[Any] = os.path.join(args.output_dir , '''eval_results.txt''' ) with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE_ , str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) if __name__ == "__main__": main()
702
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: _UpperCAmelCase : Dict =None _UpperCAmelCase : Tuple =logging.get_logger(__name__) _UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} _UpperCAmelCase : Any ={ """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", }, """tokenizer_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""", }, } _UpperCAmelCase : Dict ={ """xlnet-base-cased""": None, """xlnet-large-cased""": None, } _UpperCAmelCase : Tuple ="""▁""" # Segments (not really needed) _UpperCAmelCase : str =0 _UpperCAmelCase : List[str] =1 _UpperCAmelCase : int =2 _UpperCAmelCase : Any =3 _UpperCAmelCase : List[Any] =4 class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Any = """left""" SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]: # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token super().__init__( vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , ) lowerCAmelCase_ : List[Any] = 3 lowerCAmelCase_ : Dict = do_lower_case lowerCAmelCase_ : Dict = remove_space lowerCAmelCase_ : List[str] = keep_accents lowerCAmelCase_ : List[str] = vocab_file lowerCAmelCase_ : str = False if not self.vocab_file else True def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Tuple = [self.sep_token_id] lowerCAmelCase_ : Any = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Optional[Any] = [self.sep_token_id] lowerCAmelCase_ : List[Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__lowercase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ : str = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ): copyfile(self.vocab_file , __lowercase ) return (out_vocab_file,)
619
0
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , __lowercase , __lowercase ) -> List[str]: lowerCAmelCase_ : List[Any] = params lowerCAmelCase_ : Union[str, Any] = np.array(__lowercase ) lowerCAmelCase_ : List[Any] = np.array([len(__lowercase ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , __lowercase ) -> List[Any]: return (self.token_ids[index], self.lengths[index]) def __len__( self ) -> Union[str, Any]: return len(self.lengths ) def lowercase_ ( self ) -> Union[str, Any]: assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def lowercase_ ( self ) -> int: lowerCAmelCase_ : Union[str, Any] = self.params.max_model_input_size lowerCAmelCase_ : Optional[int] = self.lengths > max_len logger.info(f"""Splitting {sum(__lowercase )} too long sequences.""" ) def divide_chunks(__lowercase , __lowercase ): return [l[i : i + n] for i in range(0 , len(__lowercase ) , __lowercase )] lowerCAmelCase_ : Union[str, Any] = [] lowerCAmelCase_ : Union[str, Any] = [] if self.params.mlm: lowerCAmelCase_ : Dict = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: lowerCAmelCase_ : int = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: lowerCAmelCase_ : Optional[int] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: lowerCAmelCase_ : Dict = np.insert(__lowercase , 0 , __lowercase ) if sub_s[-1] != sep_id: lowerCAmelCase_ : Dict = np.insert(__lowercase , len(__lowercase ) , __lowercase ) assert len(__lowercase ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(__lowercase ) new_tok_ids.extend(__lowercase ) new_lengths.extend([len(__lowercase ) for l in sub_seqs] ) lowerCAmelCase_ : str = np.array(__lowercase ) lowerCAmelCase_ : Optional[Any] = np.array(__lowercase ) def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : Union[str, Any] = len(self ) lowerCAmelCase_ : List[Any] = self.lengths > 1_1 lowerCAmelCase_ : Dict = self.token_ids[indices] lowerCAmelCase_ : Tuple = self.lengths[indices] lowerCAmelCase_ : Any = len(self ) logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def lowercase_ ( self ) -> Tuple: if "unk_token" not in self.params.special_tok_ids: return else: lowerCAmelCase_ : int = self.params.special_tok_ids['unk_token'] lowerCAmelCase_ : str = len(self ) lowerCAmelCase_ : List[str] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) lowerCAmelCase_ : int = (unk_occs / self.lengths) < 0.5 lowerCAmelCase_ : List[str] = self.token_ids[indices] lowerCAmelCase_ : Optional[Any] = self.lengths[indices] lowerCAmelCase_ : Union[str, Any] = len(self ) logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def lowercase_ ( self ) -> Dict: if not self.params.is_master: return logger.info(f"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def lowercase_ ( self , __lowercase ) -> str: lowerCAmelCase_ : Optional[Any] = [t[0] for t in batch] lowerCAmelCase_ : List[str] = [t[1] for t in batch] assert len(__lowercase ) == len(__lowercase ) # Max for paddings lowerCAmelCase_ : List[str] = max(__lowercase ) # Pad token ids if self.params.mlm: lowerCAmelCase_ : str = self.params.special_tok_ids['pad_token'] else: lowerCAmelCase_ : Optional[int] = self.params.special_tok_ids['unk_token'] lowerCAmelCase_ : Tuple = [list(t.astype(__lowercase ) ) + [pad_idx] * (max_seq_len_ - len(__lowercase )) for t in token_ids] assert len(tk_ ) == len(__lowercase ) assert all(len(__lowercase ) == max_seq_len_ for t in tk_ ) lowerCAmelCase_ : Union[str, Any] = torch.tensor(tk_ ) # (bs, max_seq_len_) lowerCAmelCase_ : List[str] = torch.tensor(__lowercase ) # (bs) return tk_t, lg_t
703
import math import qiskit def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts: if ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) ): raise TypeError('''inputs must be integers.''' ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError('''inputs must be positive.''' ) if ( (math.floor(lowerCAmelCase_ ) != input_a) or (math.floor(lowerCAmelCase_ ) != input_a) or (math.floor(lowerCAmelCase_ ) != carry_in) ): raise ValueError('''inputs must be exact integers.''' ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError('''inputs must be less or equal to 2.''' ) # build registers lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' ) lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' ) # list the entries lowerCAmelCase_ : Any = [input_a, input_a, carry_in] lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' ) lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 ) return job.result().get_counts(lowerCAmelCase_ ) if __name__ == "__main__": print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
619
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
704
import re def lowerCAmelCase ( lowerCAmelCase_ )-> bool: lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' ) if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
619
0
import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class snake_case__( unittest.TestCase ): '''simple docstring''' @property def lowercase_ ( self ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Any = ort.SessionOptions() lowerCAmelCase_ : List[str] = False return options def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) lowerCAmelCase_ : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) lowerCAmelCase_ : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' ) # using the PNDM scheduler by default lowerCAmelCase_ : Optional[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_A ) lowerCAmelCase_ : Tuple = '''A red cat sitting on a park bench''' lowerCAmelCase_ : List[str] = np.random.RandomState(0 ) lowerCAmelCase_ : str = pipe( prompt=_A , image=_A , mask_image=_A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=_A , output_type='''np''' , ) lowerCAmelCase_ : Dict = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 1e-2
705
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _UpperCAmelCase : Any =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""] def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None: super().__init__(**__lowercase ) lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4} lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : List[Any] = do_resize lowerCAmelCase_ : Optional[int] = size # Default value set here for backwards compatibility where the value in config is None lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 lowerCAmelCase_ : Tuple = resample lowerCAmelCase_ : Optional[int] = do_rescale lowerCAmelCase_ : Any = rescale_factor lowerCAmelCase_ : List[str] = do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray: lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) if "shortest_edge" not in size: raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" ) lowerCAmelCase_ : Optional[int] = size['''shortest_edge'''] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct ) lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase ) else: # warping (no cropping) when evaluated at 384 or larger return resize( __lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any: return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray: return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image: lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct lowerCAmelCase_ : str = resample if resample is not None else self.resample lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std lowerCAmelCase_ : int = size if size is not None else self.size lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images] if do_resize: lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images] if do_rescale: lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images] if do_normalize: lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images] lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] lowerCAmelCase_ : Dict = {'''pixel_values''': images} return BatchFeature(data=__lowercase , tensor_type=__lowercase )
619
0
import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class snake_case__( __a, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = BlenderbotSmallTokenizer SCREAMING_SNAKE_CASE__ : Union[str, Any] = False def lowercase_ ( self ) -> int: super().setUp() lowerCAmelCase_ : Any = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] lowerCAmelCase_ : str = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) lowerCAmelCase_ : Tuple = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] lowerCAmelCase_ : Optional[int] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} lowerCAmelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(snake_case__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(snake_case__ ) ) def lowercase_ ( self , **__lowercase ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def lowercase_ ( self , __lowercase ) -> Union[str, Any]: lowerCAmelCase_ : int = '''adapt act apte''' lowerCAmelCase_ : int = '''adapt act apte''' return input_text, output_text def lowercase_ ( self ) -> int: lowerCAmelCase_ : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCAmelCase_ : Any = '''adapt act apte''' lowerCAmelCase_ : List[str] = ['''adapt''', '''act''', '''ap@@''', '''te'''] lowerCAmelCase_ : str = tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) lowerCAmelCase_ : Union[str, Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] lowerCAmelCase_ : Optional[Any] = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Union[str, Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [1_3_8_4] lowerCAmelCase_ : List[str] = '''I am a small frog.''' lowerCAmelCase_ : Optional[Any] = tok([src_text] , padding=snake_case__ , truncation=snake_case__ )['''input_ids'''] lowerCAmelCase_ : int = tok.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) lowerCAmelCase_ : Optional[Any] = '''I am a small frog .''' lowerCAmelCase_ : Optional[Any] = '''.''' lowerCAmelCase_ : Optional[int] = tok(snake_case__ )['''input_ids'''] lowerCAmelCase_ : Any = tok(snake_case__ )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
706
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Optional[int] =logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] ={ """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese""" def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str: super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) lowerCAmelCase_ : Optional[Any] = vocab_size lowerCAmelCase_ : Tuple = max_position_embeddings lowerCAmelCase_ : Optional[Any] = hidden_size lowerCAmelCase_ : Optional[Any] = num_hidden_layers lowerCAmelCase_ : str = num_attention_heads lowerCAmelCase_ : str = intermediate_multiple_size lowerCAmelCase_ : str = hidden_act lowerCAmelCase_ : Dict = rotary_pct lowerCAmelCase_ : Union[str, Any] = rotary_emb_base lowerCAmelCase_ : int = initializer_range lowerCAmelCase_ : Any = layer_norm_eps lowerCAmelCase_ : Optional[Any] = use_cache lowerCAmelCase_ : Tuple = attention_dropout lowerCAmelCase_ : Dict = hidden_dropout
619
0
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device _UpperCAmelCase : int =False class snake_case__( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCAmelCase_ : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) lowerCAmelCase_ : Any = torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = pipe.dual_guided( prompt='''first prompt''' , image=a_ , text_to_image_strength=0.75 , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a_ ) lowerCAmelCase_ : List[str] = VersatileDiffusionPipeline.from_pretrained(a_ , torch_dtype=torch.floataa ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCAmelCase_ : List[str] = generator.manual_seed(0 ) lowerCAmelCase_ : Optional[int] = pipe.dual_guided( prompt='''first prompt''' , image=a_ , text_to_image_strength=0.75 , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Dict = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCAmelCase_ : int = "cyberpunk 2077" lowerCAmelCase_ : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) lowerCAmelCase_ : str = torch.manual_seed(0 ) lowerCAmelCase_ : Tuple = pipe.dual_guided( prompt=a_ , image=a_ , text_to_image_strength=0.75 , generator=a_ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images lowerCAmelCase_ : List[str] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase_ : Any = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 lowerCAmelCase_ : Union[str, Any] = "A painting of a squirrel eating a burger " lowerCAmelCase_ : Union[str, Any] = torch.manual_seed(0 ) lowerCAmelCase_ : List[Any] = pipe.text_to_image( prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images lowerCAmelCase_ : List[Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase_ : str = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 lowerCAmelCase_ : Union[str, Any] = pipe.image_variation(a_ , generator=a_ , output_type='''numpy''' ).images lowerCAmelCase_ : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase_ : Optional[Any] = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
707
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]: lowerCAmelCase_ : str = parent lowerCAmelCase_ : Optional[Any] = batch_size lowerCAmelCase_ : List[Any] = is_training lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss lowerCAmelCase_ : List[Any] = num_queries lowerCAmelCase_ : str = num_channels lowerCAmelCase_ : Dict = min_size lowerCAmelCase_ : List[str] = max_size lowerCAmelCase_ : Any = num_labels lowerCAmelCase_ : str = mask_feature_size def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowercase ) lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase ) lowerCAmelCase_ : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5 ).float() lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long() lowerCAmelCase_ : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase_ ( self ) -> List[str]: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs() lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def lowercase_ ( self , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int: with torch.no_grad(): lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase ) lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowercase , __lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase ) model.to(__lowercase ) model.eval() def comm_check_on_output(__lowercase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase ) lowerCAmelCase_ : Any = model(__lowercase ) comm_check_on_output(__lowercase ) lowerCAmelCase_ : List[Any] = model( pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase ) comm_check_on_output(__lowercase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Tuple = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : List[str] = False def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Any = MaskFormerModelTester(self ) lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase ) def lowercase_ ( self ) -> Any: self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def lowercase_ ( self ) -> str: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def lowercase_ ( self ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowercase_ ( self ) -> Dict: pass def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Tuple = model_class(__lowercase ) lowerCAmelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : str = [*signature.parameters.keys()] lowerCAmelCase_ : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) @slow def lowercase_ ( self ) -> Optional[int]: for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2 lowerCAmelCase_ : List[Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ), '''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(), } lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase ) lowerCAmelCase_ : Dict = model(**__lowercase ) self.assertTrue(outputs.loss is not None ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase ) def lowercase_ ( self ) -> int: lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase ) lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase ) self.assertTrue(outputs.attentions is not None ) def lowercase_ ( self ) -> List[str]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ : int = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Optional[Any] = model_class(__lowercase ) model.to(__lowercase ) model.train() lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss loss.backward() def lowercase_ ( self ) -> Optional[int]: # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ : Any = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Any = model_class(__lowercase ) model.to(__lowercase ) model.train() lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ) lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowercase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _UpperCAmelCase : Dict =1E-4 def lowerCAmelCase ( )-> Any: lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class snake_case__( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase_ ( self ) -> Union[str, Any]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase ) lowerCAmelCase_ : Dict = self.default_image_processor lowerCAmelCase_ : int = prepare_img() lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : List[str] = model(**__lowercase ) lowerCAmelCase_ : Union[str, Any] = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) lowerCAmelCase_ : List[Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) lowerCAmelCase_ : int = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : Tuple = self.default_image_processor lowerCAmelCase_ : Optional[Any] = prepare_img() lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : Dict = model(**__lowercase ) # masks_queries_logits lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ : Tuple = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) # class_queries_logits lowerCAmelCase_ : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ : Dict = torch.tensor( [ [1.6_512e00, -5.2_572e00, -3.3_519e00], [3.6_169e-02, -5.9_025e00, -2.9_313e00], [1.0_766e-04, -7.7_630e00, -5.1_263e00], ] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : str = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : int = self.default_image_processor lowerCAmelCase_ : Optional[Any] = prepare_img() lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : str = model(**__lowercase ) # masks_queries_logits lowerCAmelCase_ : List[str] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) # class_queries_logits lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ : int = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Dict = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : str = self.default_image_processor lowerCAmelCase_ : Union[str, Any] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase ) lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']] lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']] with torch.no_grad(): lowerCAmelCase_ : str = model(**__lowercase ) self.assertTrue(outputs.loss is not None )
619
0
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _UpperCAmelCase : Union[str, Any] ={"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class snake_case__( unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE__ : Optional[int] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: SCREAMING_SNAKE_CASE__ : str = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : Optional[int] = pipeline( task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' ) lowerCAmelCase_ : List[Any] = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(__lowercase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] ) lowerCAmelCase_ : Tuple = text_classifier('''This is great !''' , top_k=2 ) self.assertEqual( nested_simplify(__lowercase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}] ) lowerCAmelCase_ : Optional[int] = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}], [{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}], ] , ) lowerCAmelCase_ : Optional[int] = text_classifier('''This is great !''' , top_k=1 ) self.assertEqual(nested_simplify(__lowercase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] ) # Legacy behavior lowerCAmelCase_ : Union[str, Any] = text_classifier('''This is great !''' , return_all_scores=__lowercase ) self.assertEqual(nested_simplify(__lowercase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] ) lowerCAmelCase_ : int = text_classifier('''This is great !''' , return_all_scores=__lowercase ) self.assertEqual( nested_simplify(__lowercase ) , [[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}]] ) lowerCAmelCase_ : Optional[int] = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=__lowercase ) self.assertEqual( nested_simplify(__lowercase ) , [ [{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}], [{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}], ] , ) lowerCAmelCase_ : List[Any] = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=__lowercase ) self.assertEqual( nested_simplify(__lowercase ) , [ {'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_0''', '''score''': 0.5_04}, ] , ) @require_torch def lowercase_ ( self ) -> Optional[Any]: import torch lowerCAmelCase_ : Any = pipeline( task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , ) lowerCAmelCase_ : Optional[int] = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(__lowercase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] ) @require_tf def lowercase_ ( self ) -> int: lowerCAmelCase_ : Optional[int] = pipeline( task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' ) lowerCAmelCase_ : List[Any] = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(__lowercase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] ) @slow @require_torch def lowercase_ ( self ) -> str: lowerCAmelCase_ : Dict = pipeline('''text-classification''' ) lowerCAmelCase_ : int = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(__lowercase ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] ) lowerCAmelCase_ : Tuple = text_classifier('''This is bad !''' ) self.assertEqual(nested_simplify(__lowercase ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] ) lowerCAmelCase_ : Dict = text_classifier('''Birds are a type of animal''' ) self.assertEqual(nested_simplify(__lowercase ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_88}] ) @slow @require_tf def lowercase_ ( self ) -> int: lowerCAmelCase_ : Optional[Any] = pipeline('''text-classification''' , framework='''tf''' ) lowerCAmelCase_ : Dict = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(__lowercase ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] ) lowerCAmelCase_ : int = text_classifier('''This is bad !''' ) self.assertEqual(nested_simplify(__lowercase ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] ) lowerCAmelCase_ : str = text_classifier('''Birds are a type of animal''' ) self.assertEqual(nested_simplify(__lowercase ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_88}] ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Optional[int]: lowerCAmelCase_ : Tuple = TextClassificationPipeline(model=__lowercase , tokenizer=__lowercase ) return text_classifier, ["HuggingFace is in", "This is another test"] def lowercase_ ( self , __lowercase , __lowercase ) -> Dict: lowerCAmelCase_ : Optional[Any] = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 lowerCAmelCase_ : List[str] = '''HuggingFace is in''' lowerCAmelCase_ : Union[str, Any] = text_classifier(__lowercase ) self.assertEqual(nested_simplify(__lowercase ) , [{'''label''': ANY(__lowercase ), '''score''': ANY(__lowercase )}] ) self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() ) lowerCAmelCase_ : str = ['''HuggingFace is in ''', '''Paris is in France'''] lowerCAmelCase_ : Optional[int] = text_classifier(__lowercase ) self.assertEqual( nested_simplify(__lowercase ) , [{'''label''': ANY(__lowercase ), '''score''': ANY(__lowercase )}, {'''label''': ANY(__lowercase ), '''score''': ANY(__lowercase )}] , ) self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() ) self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format lowerCAmelCase_ : Any = text_classifier(__lowercase , top_k=__lowercase ) lowerCAmelCase_ : str = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(__lowercase ) , [[{'''label''': ANY(__lowercase ), '''score''': ANY(__lowercase )}] * N, [{'''label''': ANY(__lowercase ), '''score''': ANY(__lowercase )}] * N] , ) lowerCAmelCase_ : Any = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''} lowerCAmelCase_ : List[Any] = text_classifier(__lowercase ) self.assertEqual( nested_simplify(__lowercase ) , {'''label''': ANY(__lowercase ), '''score''': ANY(__lowercase )} , ) self.assertTrue(outputs['''label'''] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. lowerCAmelCase_ : int = [['''HuggingFace is in ''', '''Paris is in France''']] with self.assertRaises(__lowercase ): text_classifier(__lowercase ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility lowerCAmelCase_ : List[Any] = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] ) self.assertEqual( nested_simplify(__lowercase ) , [{'''label''': ANY(__lowercase ), '''score''': ANY(__lowercase )}] , ) self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
708
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase__ ) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} ) SCREAMING_SNAKE_CASE__ : str = "audio" SCREAMING_SNAKE_CASE__ : str = "transcription" def lowercase_ ( self , __lowercase ) -> int: if self.audio_column not in features: raise ValueError(f"""Column {self.audio_column} is not present in features.""" ) if not isinstance(features[self.audio_column] , __lowercase ): raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" ) lowerCAmelCase_ : List[str] = copy.deepcopy(self ) lowerCAmelCase_ : Optional[Any] = self.input_schema.copy() lowerCAmelCase_ : Optional[Any] = features[self.audio_column] lowerCAmelCase_ : List[str] = input_schema return task_template @property def lowercase_ ( self ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
619
0
'''simple docstring''' from __future__ import annotations from collections import namedtuple def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> tuple: lowerCAmelCase_ : Union[str, Any] = namedtuple('''result''' , '''name value''' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('''Only one argument must be 0''' ) elif power < 0: raise ValueError( '''Power cannot be negative in any electrical/electronics system''' ) elif voltage == 0: return result('''voltage''' , power / current ) elif current == 0: return result('''current''' , power / voltage ) elif power == 0: return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
709
_UpperCAmelCase : int =frozenset( [ """prompt""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""]) _UpperCAmelCase : Dict =frozenset([]) _UpperCAmelCase : int =frozenset(["""image"""]) _UpperCAmelCase : Tuple =frozenset( [ """image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : int =frozenset(["""image"""]) _UpperCAmelCase : str =frozenset( [ """prompt""", """image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""]) _UpperCAmelCase : Optional[int] =frozenset( [ # Text guided image variation with an image mask """prompt""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""]) _UpperCAmelCase : Optional[Any] =frozenset( [ # image variation with an image mask """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""]) _UpperCAmelCase : Union[str, Any] =frozenset( [ """example_image""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""]) _UpperCAmelCase : Any =frozenset(["""class_labels"""]) _UpperCAmelCase : List[Any] =frozenset(["""class_labels"""]) _UpperCAmelCase : int =frozenset(["""batch_size"""]) _UpperCAmelCase : str =frozenset([]) _UpperCAmelCase : str =frozenset(["""batch_size"""]) _UpperCAmelCase : Optional[Any] =frozenset([]) _UpperCAmelCase : Tuple =frozenset( [ """prompt""", """audio_length_in_s""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""]) _UpperCAmelCase : List[str] =frozenset(["""input_tokens"""]) _UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""])
619
0
import numpy as np def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: return np.where(vector > 0 , A_ , (alpha * (np.exp(A_ ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
710
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int: lowerCAmelCase_ : Dict = 1 lowerCAmelCase_ : List[Any] = 1 lowerCAmelCase_ : Optional[Any] = {1: 1} for inputa in range(2 , lowerCAmelCase_ ): lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Dict = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCAmelCase_ : Any = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCAmelCase_ : Tuple = counter if counter > pre_counter: lowerCAmelCase_ : Optional[int] = inputa lowerCAmelCase_ : Union[str, Any] = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
619
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class snake_case__( snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = '''Salesforce/blip-image-captioning-base''' SCREAMING_SNAKE_CASE__ : str = ( '''This is a tool that generates a description of an image. It takes an input named `image` which should be the ''' '''image to caption, and returns a text that contains the description in English.''' ) SCREAMING_SNAKE_CASE__ : int = '''image_captioner''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoModelForVisionaSeq SCREAMING_SNAKE_CASE__ : Dict = ['''image'''] SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''text'''] def __init__( self , *__lowercase , **__lowercase ) -> str: requires_backends(self , ['''vision'''] ) super().__init__(*_A , **_A ) def lowercase_ ( self , __lowercase ) -> int: return self.pre_processor(images=_A , return_tensors='''pt''' ) def lowercase_ ( self , __lowercase ) -> int: return self.model.generate(**_A ) def lowercase_ ( self , __lowercase ) -> List[str]: return self.pre_processor.batch_decode(_A , skip_special_tokens=_A )[0].strip()
711
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : str =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder""" SCREAMING_SNAKE_CASE__ : str = True def __init__( self , **__lowercase ) -> Union[str, Any]: super().__init__(**__lowercase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" lowerCAmelCase_ : str = kwargs.pop('''encoder''' ) lowerCAmelCase_ : int = encoder_config.pop('''model_type''' ) lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' ) lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase ) lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase ) lowerCAmelCase_ : Any = True @classmethod def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig: logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowerCAmelCase_ : int = True lowerCAmelCase_ : List[Any] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ : List[str] = self.encoder.to_dict() lowerCAmelCase_ : Dict = self.decoder.to_dict() lowerCAmelCase_ : Optional[Any] = self.__class__.model_type return output
619
0
'''simple docstring''' import requests def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> None: lowerCAmelCase_ : List[Any] = {'''Content-Type''': '''application/json'''} lowerCAmelCase_ : Optional[int] = requests.post(__snake_case , json={'''text''': message_body} , headers=__snake_case ) if response.status_code != 200: lowerCAmelCase_ : List[Any] = ( '''Request to slack returned an error ''' f"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(__snake_case ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
712
from __future__ import annotations from math import pi def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]: if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if inductance < 0: raise ValueError('''Inductance cannot be negative''' ) if frequency < 0: raise ValueError('''Frequency cannot be negative''' ) if reactance < 0: raise ValueError('''Inductive reactance cannot be negative''' ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
619
0
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING _UpperCAmelCase : str =logging.get_logger(__name__) @add_end_docstrings(lowercase_ ) class snake_case__( lowercase_ ): '''simple docstring''' def __init__( self , **__lowercase ) -> Optional[int]: super().__init__(**lowerCamelCase_ ) if self.framework == "tf": raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" ) requires_backends(self , '''vision''' ) self.check_model_type(lowerCamelCase_ ) def __call__( self , __lowercase , __lowercase = None , **__lowercase , ) -> Optional[int]: if "text_queries" in kwargs: lowerCAmelCase_ : List[str] = kwargs.pop('''text_queries''' ) if isinstance(lowerCamelCase_ , (str, Image.Image) ): lowerCAmelCase_ : int = {"""image""": image, """candidate_labels""": candidate_labels} else: lowerCAmelCase_ : Optional[Any] = image lowerCAmelCase_ : List[str] = super().__call__(lowerCamelCase_ , **lowerCamelCase_ ) return results def lowercase_ ( self , **__lowercase ) -> Tuple: lowerCAmelCase_ : List[str] = {} if "threshold" in kwargs: lowerCAmelCase_ : Any = kwargs["""threshold"""] if "top_k" in kwargs: lowerCAmelCase_ : Tuple = kwargs["""top_k"""] return {}, {}, postprocess_params def lowercase_ ( self , __lowercase ) -> List[Any]: lowerCAmelCase_ : int = load_image(inputs['''image'''] ) lowerCAmelCase_ : Optional[Any] = inputs["""candidate_labels"""] if isinstance(lowerCamelCase_ , lowerCamelCase_ ): lowerCAmelCase_ : Dict = candidate_labels.split(''',''' ) lowerCAmelCase_ : Union[str, Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(lowerCamelCase_ ): lowerCAmelCase_ : Any = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework ) lowerCAmelCase_ : List[str] = self.image_processor(lowerCamelCase_ , return_tensors=self.framework ) yield { "is_last": i == len(lowerCamelCase_ ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def lowercase_ ( self , __lowercase ) -> List[Any]: lowerCAmelCase_ : Union[str, Any] = model_inputs.pop('''target_size''' ) lowerCAmelCase_ : Any = model_inputs.pop('''candidate_label''' ) lowerCAmelCase_ : str = model_inputs.pop('''is_last''' ) lowerCAmelCase_ : List[Any] = self.model(**lowerCamelCase_ ) lowerCAmelCase_ : List[str] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs} return model_outputs def lowercase_ ( self , __lowercase , __lowercase=0.1 , __lowercase=None ) -> Optional[int]: lowerCAmelCase_ : Any = [] for model_output in model_outputs: lowerCAmelCase_ : List[str] = model_output["""candidate_label"""] lowerCAmelCase_ : Union[str, Any] = BaseModelOutput(lowerCamelCase_ ) lowerCAmelCase_ : Any = self.image_processor.post_process_object_detection( outputs=lowerCamelCase_ , threshold=lowerCamelCase_ , target_sizes=model_output['''target_size'''] )[0] for index in outputs["scores"].nonzero(): lowerCAmelCase_ : str = outputs["""scores"""][index].item() lowerCAmelCase_ : List[Any] = self._get_bounding_box(outputs['''boxes'''][index][0] ) lowerCAmelCase_ : Any = {"""score""": score, """label""": label, """box""": box} results.append(lowerCamelCase_ ) lowerCAmelCase_ : List[str] = sorted(lowerCamelCase_ , key=lambda __lowercase : x["score"] , reverse=lowerCamelCase_ ) if top_k: lowerCAmelCase_ : str = results[:top_k] return results def lowercase_ ( self , __lowercase ) -> Optional[Any]: if self.framework != "pt": raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' ) lowerCAmelCase_ : Union[str, Any] = box.int().tolist() lowerCAmelCase_ : Dict = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
713
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging _UpperCAmelCase : Tuple =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = """linear""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine""" SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts""" SCREAMING_SNAKE_CASE__ : List[str] = """polynomial""" SCREAMING_SNAKE_CASE__ : Dict = """constant""" SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup""" SCREAMING_SNAKE_CASE__ : str = """piecewise_constant""" def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple: return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) ) return 1.0 return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int: lowerCAmelCase_ : Optional[int] = {} lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' ) for rule_str in rule_list[:-1]: lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' ) lowerCAmelCase_ : int = int(lowerCAmelCase_ ) lowerCAmelCase_ : str = float(lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = value lowerCAmelCase_ : int = float(rule_list[-1] ) def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ): def rule_func(lowerCAmelCase_ ) -> float: lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(lowerCAmelCase_ ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any: lowerCAmelCase_ : Dict = optimizer.defaults['''lr'''] if not (lr_init > lr_end): raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lowerCAmelCase_ : List[Any] = lr_init - lr_end lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _UpperCAmelCase : Union[str, Any] ={ SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]: lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ ) lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , ) return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
619
0
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""): raise Exception("""requires fairseq >= 1.0.0a""") logging.set_verbosity_info() _UpperCAmelCase : List[Any] =logging.get_logger(__name__) _UpperCAmelCase : List[Any] ="""Hello world! cécé herlolip""" def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int: lowerCAmelCase_ : Any = FairseqRobertaModel.from_pretrained(A__ ) roberta.eval() # disable dropout lowerCAmelCase_ : List[Any] = roberta.model.encoder.sentence_encoder lowerCAmelCase_ : str = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: lowerCAmelCase_ : Tuple = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our RoBERTa config:''' , A__ ) lowerCAmelCase_ : Union[str, Any] = XLMRobertaXLForSequenceClassification(A__ ) if classification_head else XLMRobertaXLForMaskedLM(A__ ) model.eval() # Now let's copy all the weights. # Embeddings lowerCAmelCase_ : Optional[Any] = roberta_sent_encoder.embed_tokens.weight lowerCAmelCase_ : List[Any] = roberta_sent_encoder.embed_positions.weight lowerCAmelCase_ : List[Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. lowerCAmelCase_ : str = roberta_sent_encoder.layer_norm.weight lowerCAmelCase_ : Dict = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer lowerCAmelCase_ : int = model.roberta.encoder.layer[i] lowerCAmelCase_ : List[str] = roberta_sent_encoder.layers[i] lowerCAmelCase_ : Any = layer.attention lowerCAmelCase_ : Any = roberta_layer.self_attn_layer_norm.weight lowerCAmelCase_ : str = roberta_layer.self_attn_layer_norm.bias # self attention lowerCAmelCase_ : Tuple = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) lowerCAmelCase_ : Tuple = roberta_layer.self_attn.q_proj.weight lowerCAmelCase_ : Optional[Any] = roberta_layer.self_attn.q_proj.bias lowerCAmelCase_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight lowerCAmelCase_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias lowerCAmelCase_ : List[Any] = roberta_layer.self_attn.v_proj.weight lowerCAmelCase_ : str = roberta_layer.self_attn.v_proj.bias # self-attention output lowerCAmelCase_ : Any = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape lowerCAmelCase_ : int = roberta_layer.self_attn.out_proj.weight lowerCAmelCase_ : Dict = roberta_layer.self_attn.out_proj.bias # this one is final layer norm lowerCAmelCase_ : Tuple = roberta_layer.final_layer_norm.weight lowerCAmelCase_ : List[Any] = roberta_layer.final_layer_norm.bias # intermediate lowerCAmelCase_ : List[str] = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape lowerCAmelCase_ : Optional[Any] = roberta_layer.fca.weight lowerCAmelCase_ : List[str] = roberta_layer.fca.bias # output lowerCAmelCase_ : Tuple = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape lowerCAmelCase_ : Optional[int] = roberta_layer.fca.weight lowerCAmelCase_ : Any = roberta_layer.fca.bias # end of layer if classification_head: lowerCAmelCase_ : Tuple = roberta.model.classification_heads['''mnli'''].dense.weight lowerCAmelCase_ : Optional[int] = roberta.model.classification_heads['''mnli'''].dense.bias lowerCAmelCase_ : Optional[Any] = roberta.model.classification_heads['''mnli'''].out_proj.weight lowerCAmelCase_ : Tuple = roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head lowerCAmelCase_ : Tuple = roberta.model.encoder.lm_head.dense.weight lowerCAmelCase_ : Tuple = roberta.model.encoder.lm_head.dense.bias lowerCAmelCase_ : int = roberta.model.encoder.lm_head.layer_norm.weight lowerCAmelCase_ : Tuple = roberta.model.encoder.lm_head.layer_norm.bias lowerCAmelCase_ : Union[str, Any] = roberta.model.encoder.lm_head.weight lowerCAmelCase_ : Optional[Any] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. lowerCAmelCase_ : Optional[Any] = roberta.encode(A__ ).unsqueeze(0 ) # batch of size 1 lowerCAmelCase_ : int = model(A__ )[0] if classification_head: lowerCAmelCase_ : Any = roberta.model.classification_heads['''mnli'''](roberta.extract_features(A__ ) ) else: lowerCAmelCase_ : List[Any] = roberta.model(A__ )[0] print(our_output.shape , their_output.shape ) lowerCAmelCase_ : str = torch.max(torch.abs(our_output - their_output ) ).item() print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7 lowerCAmelCase_ : Optional[Any] = torch.allclose(A__ , A__ , atol=1e-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) pathlib.Path(A__ ).mkdir(parents=A__ , exist_ok=A__ ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(A__ ) if __name__ == "__main__": _UpperCAmelCase : Tuple =argparse.ArgumentParser() # Required parameters parser.add_argument( """--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) _UpperCAmelCase : Union[str, Any] =parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
714
from __future__ import annotations def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative in a semiconductor''' ) elif hole_conc < 0: raise ValueError('''Hole concentration cannot be negative in a semiconductor''' ) elif intrinsic_conc < 0: raise ValueError( '''Intrinsic concentration cannot be negative in a semiconductor''' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
619
0
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 _UpperCAmelCase : Union[str, Any] =sys.version_info >= (3, 10) def lowerCAmelCase ( lowerCAmelCase_=None , lowerCAmelCase_=None )-> Union[str, Any]: return field(default_factory=lambda: default , metadata=lowerCamelCase__ ) @dataclass class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = 42 SCREAMING_SNAKE_CASE__ : Dict = 42 SCREAMING_SNAKE_CASE__ : str = 42 SCREAMING_SNAKE_CASE__ : int = 42 @dataclass class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = 42 SCREAMING_SNAKE_CASE__ : Optional[int] = field(default="""toto""", metadata={"""help""": """help message"""} ) @dataclass class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : int = None class snake_case__( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = """titi""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = """toto""" class snake_case__( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = """titi""" SCREAMING_SNAKE_CASE__ : Dict = """toto""" SCREAMING_SNAKE_CASE__ : List[Any] = 42 @dataclass class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = """toto""" def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : Optional[Any] = BasicEnum(self.foo ) @dataclass class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = """toto""" def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : List[str] = MixedTypeEnum(self.foo ) @dataclass class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = None SCREAMING_SNAKE_CASE__ : int = field(default=_UpperCamelCase, metadata={"""help""": """help message"""} ) SCREAMING_SNAKE_CASE__ : List[str] = None SCREAMING_SNAKE_CASE__ : Dict = list_field(default=[] ) SCREAMING_SNAKE_CASE__ : List[Any] = list_field(default=[] ) @dataclass class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = list_field(default=[] ) SCREAMING_SNAKE_CASE__ : List[str] = list_field(default=[1, 2, 3] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = field() SCREAMING_SNAKE_CASE__ : List[Any] = field() SCREAMING_SNAKE_CASE__ : Any = field() def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : List[Any] = BasicEnum(self.required_enum ) @dataclass class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = 42 SCREAMING_SNAKE_CASE__ : Union[str, Any] = field() SCREAMING_SNAKE_CASE__ : int = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = field(default="""toto""", metadata={"""help""": """help message"""} ) SCREAMING_SNAKE_CASE__ : Optional[Any] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] ) if is_python_no_less_than_3_10: @dataclass class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : Optional[int] = True SCREAMING_SNAKE_CASE__ : Any = None @dataclass class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = None SCREAMING_SNAKE_CASE__ : List[str] = field(default=_UpperCamelCase, metadata={"""help""": """help message"""} ) SCREAMING_SNAKE_CASE__ : int = None SCREAMING_SNAKE_CASE__ : Any = list_field(default=[] ) SCREAMING_SNAKE_CASE__ : int = list_field(default=[] ) class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self , __lowercase , __lowercase ) -> List[Any]: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): lowerCAmelCase_ : Tuple = {k: v for k, v in vars(__lowercase ).items() if k != "container"} lowerCAmelCase_ : Optional[Any] = {k: v for k, v in vars(__lowercase ).items() if k != "container"} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , __lowercase ) and yy.get('''choices''' , __lowercase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](__lowercase ) , yy['''type'''](__lowercase ) ) del xx["type"], yy["type"] self.assertEqual(__lowercase , __lowercase ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Any = HfArgumentParser(__lowercase ) lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__lowercase , required=__lowercase ) expected.add_argument('''--bar''' , type=__lowercase , required=__lowercase ) expected.add_argument('''--baz''' , type=__lowercase , required=__lowercase ) expected.add_argument('''--flag''' , type=__lowercase , default=__lowercase , const=__lowercase , nargs='''?''' ) self.argparsersEqual(__lowercase , __lowercase ) lowerCAmelCase_ : Tuple = ["--foo", "1", "--baz", "quux", "--bar", "0.5"] (lowerCAmelCase_ ) : Optional[Any] = parser.parse_args_into_dataclasses(__lowercase , look_for_args_file=__lowercase ) self.assertFalse(example.flag ) def lowercase_ ( self ) -> str: lowerCAmelCase_ : List[Any] = HfArgumentParser(__lowercase ) lowerCAmelCase_ : Tuple = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=4_2 , type=__lowercase ) expected.add_argument('''--baz''' , default='''toto''' , type=__lowercase , help='''help message''' ) self.argparsersEqual(__lowercase , __lowercase ) def lowercase_ ( self ) -> int: lowerCAmelCase_ : List[str] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__lowercase , default=__lowercase , const=__lowercase , nargs='''?''' ) expected.add_argument('''--baz''' , type=__lowercase , default=__lowercase , const=__lowercase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=__lowercase , dest='''baz''' ) expected.add_argument('''--opt''' , type=__lowercase , default=__lowercase ) lowerCAmelCase_ : Optional[int] = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__lowercase ) for dataclass_type in dataclass_types: lowerCAmelCase_ : int = HfArgumentParser(__lowercase ) self.argparsersEqual(__lowercase , __lowercase ) lowerCAmelCase_ : str = parser.parse_args([] ) self.assertEqual(__lowercase , Namespace(foo=__lowercase , baz=__lowercase , opt=__lowercase ) ) lowerCAmelCase_ : Dict = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(__lowercase , Namespace(foo=__lowercase , baz=__lowercase , opt=__lowercase ) ) lowerCAmelCase_ : Any = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(__lowercase , Namespace(foo=__lowercase , baz=__lowercase , opt=__lowercase ) ) lowerCAmelCase_ : int = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(__lowercase , Namespace(foo=__lowercase , baz=__lowercase , opt=__lowercase ) ) lowerCAmelCase_ : Optional[int] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(__lowercase , Namespace(foo=__lowercase , baz=__lowercase , opt=__lowercase ) ) def lowercase_ ( self ) -> str: lowerCAmelCase_ : Any = HfArgumentParser(__lowercase ) lowerCAmelCase_ : List[Any] = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__lowercase , __lowercase ) lowerCAmelCase_ : List[Any] = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) lowerCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) lowerCAmelCase_ : List[Any] = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) lowerCAmelCase_ : Optional[int] = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) lowerCAmelCase_ : Optional[Any] = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) lowerCAmelCase_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowercase_ ( self ) -> int: @dataclass class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = """toto""" lowerCAmelCase_ : Dict = HfArgumentParser(__lowercase ) lowerCAmelCase_ : List[str] = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__lowercase , __lowercase ) lowerCAmelCase_ : int = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) lowerCAmelCase_ : Optional[int] = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) lowerCAmelCase_ : Union[str, Any] = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) def lowercase_ ( self ) -> int: lowerCAmelCase_ : Union[str, Any] = HfArgumentParser(__lowercase ) lowerCAmelCase_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__lowercase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__lowercase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__lowercase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__lowercase ) self.argparsersEqual(__lowercase , __lowercase ) lowerCAmelCase_ : Any = parser.parse_args([] ) self.assertEqual( __lowercase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) lowerCAmelCase_ : Any = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(__lowercase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : List[str] = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=__lowercase , type=__lowercase ) expected.add_argument('''--bar''' , default=__lowercase , type=__lowercase , help='''help message''' ) expected.add_argument('''--baz''' , default=__lowercase , type=__lowercase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__lowercase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__lowercase ) lowerCAmelCase_ : str = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__lowercase ) for dataclass_type in dataclass_types: lowerCAmelCase_ : Dict = HfArgumentParser(__lowercase ) self.argparsersEqual(__lowercase , __lowercase ) lowerCAmelCase_ : Any = parser.parse_args([] ) self.assertEqual(__lowercase , Namespace(foo=__lowercase , bar=__lowercase , baz=__lowercase , ces=[] , des=[] ) ) lowerCAmelCase_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(__lowercase , Namespace(foo=1_2 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def lowercase_ ( self ) -> str: lowerCAmelCase_ : str = HfArgumentParser(__lowercase ) lowerCAmelCase_ : Union[str, Any] = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=__lowercase , required=__lowercase ) expected.add_argument('''--required_str''' , type=__lowercase , required=__lowercase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__lowercase , ) self.argparsersEqual(__lowercase , __lowercase ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : str = HfArgumentParser(__lowercase ) lowerCAmelCase_ : int = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__lowercase , required=__lowercase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__lowercase , ) expected.add_argument('''--opt''' , type=__lowercase , default=__lowercase ) expected.add_argument('''--baz''' , default='''toto''' , type=__lowercase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__lowercase ) self.argparsersEqual(__lowercase , __lowercase ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = HfArgumentParser(__lowercase ) lowerCAmelCase_ : Dict = { "foo": 1_2, "bar": 3.14, "baz": "42", "flag": True, } lowerCAmelCase_ : Optional[int] = parser.parse_dict(__lowercase )[0] lowerCAmelCase_ : Optional[int] = BasicExample(**__lowercase ) self.assertEqual(__lowercase , __lowercase ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : str = HfArgumentParser(__lowercase ) lowerCAmelCase_ : List[Any] = { "foo": 1_2, "bar": 3.14, "baz": "42", "flag": True, "extra": 4_2, } self.assertRaises(__lowercase , parser.parse_dict , __lowercase , allow_extra_keys=__lowercase ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Union[str, Any] = HfArgumentParser(__lowercase ) lowerCAmelCase_ : Optional[Any] = { "foo": 1_2, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase_ : Dict = os.path.join(__lowercase , '''temp_json''' ) os.mkdir(__lowercase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(__lowercase , __lowercase ) lowerCAmelCase_ : List[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] lowerCAmelCase_ : str = BasicExample(**__lowercase ) self.assertEqual(__lowercase , __lowercase ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : str = HfArgumentParser(__lowercase ) lowerCAmelCase_ : str = { "foo": 1_2, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase_ : str = os.path.join(__lowercase , '''temp_yaml''' ) os.mkdir(__lowercase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(__lowercase , __lowercase ) lowerCAmelCase_ : int = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] lowerCAmelCase_ : Union[str, Any] = BasicExample(**__lowercase ) self.assertEqual(__lowercase , __lowercase ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Union[str, Any] = HfArgumentParser(__lowercase ) self.assertIsNotNone(__lowercase )
715
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _UpperCAmelCase : Any ="""src/transformers""" # This is to make sure the transformers module imported is the one in the repo. _UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS) _UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` _UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") _UpperCAmelCase : Any ={ """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : Any = None # source code of `config_class` lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ ) lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/''' ): lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: lowerCAmelCase_ : List[str] = ckpt_name break return checkpoint def lowerCAmelCase ( )-> Optional[Any]: lowerCAmelCase_ : Tuple = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ ) lowerCAmelCase_ : Tuple = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) ) raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
619
0
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict: global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: lowerCAmelCase_ : Tuple = mf_knapsack(i - 1 , a__ , a__ , a__ ) else: lowerCAmelCase_ : Optional[int] = max( mf_knapsack(i - 1 , a__ , a__ , a__ ) , mf_knapsack(i - 1 , a__ , a__ , j - wt[i - 1] ) + val[i - 1] , ) lowerCAmelCase_ : int = val return f[i][j] def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]: lowerCAmelCase_ : List[str] = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: lowerCAmelCase_ : Dict = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: lowerCAmelCase_ : str = dp[i - 1][w_] return dp[n][w_], dp def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict: if not (isinstance(a__ , (list, tuple) ) and isinstance(a__ , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) lowerCAmelCase_ : Union[str, Any] = len(a__ ) if num_items != len(a__ ): lowerCAmelCase_ : List[str] = ( '''The number of weights must be the same as the number of values.\n''' f"""But got {num_items} weights and {len(a__ )} values""" ) raise ValueError(a__ ) for i in range(a__ ): if not isinstance(wt[i] , a__ ): lowerCAmelCase_ : Union[str, Any] = ( '''All weights must be integers but got weight of ''' f"""type {type(wt[i] )} at index {i}""" ) raise TypeError(a__ ) lowerCAmelCase_ , lowerCAmelCase_ : List[str] = knapsack(a__ , a__ , a__ , a__ ) lowerCAmelCase_ : Optional[int] = set() _construct_solution(a__ , a__ , a__ , a__ , a__ ) return optimal_val, example_optional_set def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict: if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(a__ , a__ , i - 1 , a__ , a__ ) else: optimal_set.add(a__ ) _construct_solution(a__ , a__ , i - 1 , j - wt[i - 1] , a__ ) if __name__ == "__main__": _UpperCAmelCase : Optional[Any] =[3, 2, 4, 4] _UpperCAmelCase : Tuple =[4, 3, 2, 3] _UpperCAmelCase : Optional[Any] =4 _UpperCAmelCase : List[Any] =6 _UpperCAmelCase : int =[[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] =knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] =knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
716
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple: lowerCAmelCase_ : Optional[int] = scheduler lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers] lowerCAmelCase_ : str = split_batches lowerCAmelCase_ : Any = step_with_optimizer lowerCAmelCase_ : Optional[Any] = GradientState() def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__lowercase , **__lowercase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__lowercase , **__lowercase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes for _ in range(__lowercase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , '''total_steps''' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__lowercase , **__lowercase ) else: self.scheduler.step(*__lowercase , **__lowercase ) def lowercase_ ( self ) -> Union[str, Any]: return self.scheduler.get_last_lr() def lowercase_ ( self ) -> List[str]: return self.scheduler.state_dict() def lowercase_ ( self , __lowercase ) -> int: self.scheduler.load_state_dict(__lowercase ) def lowercase_ ( self ) -> Tuple: return self.scheduler.get_lr() def lowercase_ ( self , *__lowercase , **__lowercase ) -> int: return self.scheduler.print_lr(*__lowercase , **__lowercase )
619
0
from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class snake_case__( __lowerCAmelCase ): '''simple docstring''' def lowercase_ ( self ) -> List[str]: return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : Dict = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']} return Dataset.from_dict(lowerCAmelCase_ ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[int] = self._create_example_records() lowerCAmelCase_ : List[Any] = Dataset.from_list(lowerCAmelCase_ ) self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] ) for i, r in enumerate(lowerCAmelCase_ ): self.assertDictEqual(lowerCAmelCase_ , example_records[i] ) def lowercase_ ( self ) -> str: lowerCAmelCase_ : int = self._create_example_records() lowerCAmelCase_ : Union[str, Any] = Dataset.from_list(lowerCAmelCase_ ) lowerCAmelCase_ : Optional[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} ) self.assertEqual(dset.info , dset_from_dict.info ) def lowercase_ ( self ) -> Any: # checks what happens with missing columns lowerCAmelCase_ : Union[str, Any] = [{'''col_1''': 1}, {'''col_2''': '''x'''}] lowerCAmelCase_ : List[str] = Dataset.from_list(lowerCAmelCase_ ) self.assertDictEqual(dset[0] , {'''col_1''': 1} ) self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns def lowercase_ ( self ) -> Tuple: # checks if the type can be inferred from the second record lowerCAmelCase_ : Any = [{'''col_1''': []}, {'''col_1''': [1, 2]}] lowerCAmelCase_ : Dict = Dataset.from_list(lowerCAmelCase_ ) self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : List[str] = Dataset.from_list([] ) self.assertEqual(len(lowerCAmelCase_ ) , 0 ) self.assertListEqual(dset.column_names , [] )
717
from manim import * class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : int = [mem.copy() for i in range(6 )] lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 ) lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowercase ) lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )] lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 ) lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) gpu.move_to([-1, -1, 0] ) self.add(__lowercase ) lowerCAmelCase_ : str = [mem.copy() for i in range(6 )] lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 ) lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) model.move_to([3, -1.0, 0] ) self.add(__lowercase ) lowerCAmelCase_ : int = [] lowerCAmelCase_ : int = [] lowerCAmelCase_ : Dict = [] for i, rect in enumerate(__lowercase ): rect.set_stroke(__lowercase ) lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 ) self.add(__lowercase ) model_cpu_arr.append(__lowercase ) self.add(*__lowercase , *__lowercase , *__lowercase ) lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 ) lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__lowercase ) lowerCAmelCase_ : Optional[Any] = [] lowerCAmelCase_ : Dict = [] for i, rect in enumerate(__lowercase ): lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 ) target.move_to(__lowercase ) ckpt_arr.append(__lowercase ) lowerCAmelCase_ : Union[str, Any] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__lowercase ) self.add(*__lowercase , *__lowercase ) lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase_ : str = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowercase , __lowercase ) lowerCAmelCase_ : str = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , ) blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__lowercase ) lowerCAmelCase_ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )] lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 ) lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) ) lowerCAmelCase_ : int = [] for i, rect in enumerate(__lowercase ): lowerCAmelCase_ : int = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__lowercase , run_time=1.5 ) ) self.play(*__lowercase ) self.play(FadeOut(__lowercase ) ) lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowercase , run_time=3 ) ) self.play( FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , ) self.wait()
619
0
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict: lowerCAmelCase_ : Dict = args.log_outputs lowerCAmelCase_ : Optional[int] = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric lowerCAmelCase_ : Optional[Any] = load_metric('''wer''' ) lowerCAmelCase_ : Union[str, Any] = load_metric('''cer''' ) # compute metrics lowerCAmelCase_ : Union[str, Any] = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) lowerCAmelCase_ : List[Any] = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results lowerCAmelCase_ : Optional[Any] = f"""WER: {wer_result}\nCER: {cer_result}""" print(lowerCAmelCase_ ) with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f: f.write(lowerCAmelCase_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: lowerCAmelCase_ : Optional[int] = f"""log_{dataset_id}_predictions.txt""" lowerCAmelCase_ : Optional[int] = f"""log_{dataset_id}_targets.txt""" with open(lowerCAmelCase_ , '''w''' ) as p, open(lowerCAmelCase_ , '''w''' ) as t: # mapping function to write output def write_to_file(lowerCAmelCase_ , lowerCAmelCase_ ): p.write(f"""{i}""" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"""{i}""" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(lowerCAmelCase_ , with_indices=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : List[Any] = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training lowerCAmelCase_ : Optional[Any] = re.sub(lowerCAmelCase_ , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! lowerCAmelCase_ : List[str] = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: lowerCAmelCase_ : Optional[int] = ''' '''.join(text.split(lowerCAmelCase_ ) ) return text def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: lowerCAmelCase_ : Dict = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCAmelCase_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor lowerCAmelCase_ : int = AutoFeatureExtractor.from_pretrained(args.model_id ) lowerCAmelCase_ : Optional[Any] = feature_extractor.sampling_rate # resample audio lowerCAmelCase_ : Tuple = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCAmelCase_ ) ) # load eval pipeline if args.device is None: lowerCAmelCase_ : Union[str, Any] = 0 if torch.cuda.is_available() else -1 lowerCAmelCase_ : Optional[int] = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(lowerCAmelCase_ ): lowerCAmelCase_ : List[str] = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) lowerCAmelCase_ : List[Any] = prediction['''text'''] lowerCAmelCase_ : List[Any] = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples lowerCAmelCase_ : str = dataset.map(lowerCAmelCase_ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": _UpperCAmelCase : Any =argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `\'en\'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `\'test\'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _UpperCAmelCase : Any =parser.parse_args() main(args)
718
_UpperCAmelCase : Dict =[ (1000, """M"""), (900, """CM"""), (500, """D"""), (400, """CD"""), (100, """C"""), (90, """XC"""), (50, """L"""), (40, """XL"""), (10, """X"""), (9, """IX"""), (5, """V"""), (4, """IV"""), (1, """I"""), ] def lowerCAmelCase ( lowerCAmelCase_ )-> int: lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000} lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : List[str] = 0 while place < len(lowerCAmelCase_ ): if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : List[Any] = [] for arabic, roman in ROMAN: ((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ ) result.append(roman * factor ) if number == 0: break return "".join(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
619
0
import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]: if isinstance(lowerCAmelCase_ , collections.abc.Iterable ): return x return (x, x) @require_flax class snake_case__: '''simple docstring''' def lowercase_ ( self , __lowercase , __lowercase ) -> Optional[Any]: pass def lowercase_ ( self ) -> List[str]: pass def lowercase_ ( self ) -> int: pass def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Tuple: lowerCAmelCase_ : Optional[int] = np.abs((a - b) ).max() self.assertLessEqual(snake_case__ , snake_case__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> Any: lowerCAmelCase_ : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ , snake_case__ ) lowerCAmelCase_ : Any = FlaxVisionTextDualEncoderModel(snake_case__ ) lowerCAmelCase_ : List[str] = model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> Union[str, Any]: lowerCAmelCase_ : Any = self.get_vision_text_model(snake_case__ , snake_case__ ) lowerCAmelCase_ : Any = {"vision_model": vision_model, "text_model": text_model} lowerCAmelCase_ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ ) lowerCAmelCase_ : int = model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> Optional[int]: lowerCAmelCase_ : Union[str, Any] = self.get_vision_text_model(snake_case__ , snake_case__ ) lowerCAmelCase_ : int = {"vision_model": vision_model, "text_model": text_model} lowerCAmelCase_ : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ ) lowerCAmelCase_ : Optional[int] = model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ ) lowerCAmelCase_ : int = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(snake_case__ ) lowerCAmelCase_ : Any = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ ) lowerCAmelCase_ : Union[str, Any] = model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ ) lowerCAmelCase_ : Optional[Any] = after_output[0] lowerCAmelCase_ : List[str] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(snake_case__ , 1e-3 ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> Any: lowerCAmelCase_ : Union[str, Any] = self.get_vision_text_model(snake_case__ , snake_case__ ) lowerCAmelCase_ : Tuple = {"vision_model": vision_model, "text_model": text_model} lowerCAmelCase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ ) lowerCAmelCase_ : Union[str, Any] = model( input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , output_attentions=snake_case__ ) lowerCAmelCase_ : Tuple = output.vision_model_output.attentions self.assertEqual(len(snake_case__ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase_ : List[Any] = to_atuple(vision_model.config.image_size ) lowerCAmelCase_ : Dict = to_atuple(vision_model.config.patch_size ) lowerCAmelCase_ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCAmelCase_ : List[str] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowerCAmelCase_ : int = output.text_model_output.attentions self.assertEqual(len(snake_case__ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> int: pt_model.to(snake_case__ ) pt_model.eval() # prepare inputs lowerCAmelCase_ : Dict = inputs_dict lowerCAmelCase_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): lowerCAmelCase_ : Union[str, Any] = pt_model(**snake_case__ ).to_tuple() lowerCAmelCase_ : Optional[int] = fx_model(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(snake_case__ , pt_output.numpy() , 4e-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(snake_case__ ) lowerCAmelCase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ , from_pt=snake_case__ ) lowerCAmelCase_ : Union[str, Any] = fx_model_loaded(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(snake_case__ , pt_output.numpy() , 4e-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(snake_case__ ) lowerCAmelCase_ : Optional[Any] = VisionTextDualEncoderModel.from_pretrained(snake_case__ , from_flax=snake_case__ ) pt_model_loaded.to(snake_case__ ) pt_model_loaded.eval() with torch.no_grad(): lowerCAmelCase_ : List[str] = pt_model_loaded(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(snake_case__ , pt_output_loaded.numpy() , 4e-2 ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Tuple: lowerCAmelCase_ : str = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ , snake_case__ ) lowerCAmelCase_ : Tuple = VisionTextDualEncoderModel(snake_case__ ) lowerCAmelCase_ : Optional[int] = FlaxVisionTextDualEncoderModel(snake_case__ ) lowerCAmelCase_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case__ ) lowerCAmelCase_ : Optional[int] = fx_state self.check_pt_flax_equivalence(snake_case__ , snake_case__ , snake_case__ ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> int: lowerCAmelCase_ : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ , snake_case__ ) lowerCAmelCase_ : Dict = VisionTextDualEncoderModel(snake_case__ ) lowerCAmelCase_ : Optional[Any] = FlaxVisionTextDualEncoderModel(snake_case__ ) lowerCAmelCase_ : Tuple = load_flax_weights_in_pytorch_model(snake_case__ , fx_model.params ) self.check_pt_flax_equivalence(snake_case__ , snake_case__ , snake_case__ ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : int = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**snake_case__ ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Any = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**snake_case__ ) def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : int = self.prepare_config_and_inputs() self.check_save_load(**snake_case__ ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**snake_case__ ) @is_pt_flax_cross_test def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Tuple = self.prepare_config_and_inputs() lowerCAmelCase_ : Dict = config_inputs_dict.pop('''vision_config''' ) lowerCAmelCase_ : List[str] = config_inputs_dict.pop('''text_config''' ) lowerCAmelCase_ : Optional[int] = config_inputs_dict self.check_equivalence_pt_to_flax(snake_case__ , snake_case__ , snake_case__ ) self.check_equivalence_flax_to_pt(snake_case__ , snake_case__ , snake_case__ ) @slow def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Optional[int] = self.get_pretrained_model_and_inputs() lowerCAmelCase_ : Optional[Any] = model_a(**snake_case__ ) lowerCAmelCase_ : List[str] = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(snake_case__ ) lowerCAmelCase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ ) lowerCAmelCase_ : Union[str, Any] = model_a(**snake_case__ ) lowerCAmelCase_ : Optional[Any] = after_outputs[0] lowerCAmelCase_ : Optional[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(snake_case__ , 1e-5 ) @require_flax class snake_case__( SCREAMING_SNAKE_CASE__, unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> str: lowerCAmelCase_ : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=snake_case__ , text_from_pt=snake_case__ , ) lowerCAmelCase_ : Tuple = 1_3 lowerCAmelCase_ : List[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowerCAmelCase_ : List[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowerCAmelCase_ : List[str] = random_attention_mask([batch_size, 4] ) lowerCAmelCase_ : List[str] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def lowercase_ ( self , __lowercase , __lowercase ) -> str: lowerCAmelCase_ : Union[str, Any] = FlaxViTModel(snake_case__ ) lowerCAmelCase_ : Optional[Any] = FlaxBertModel(snake_case__ ) return vision_model, text_model def lowercase_ ( self ) -> str: lowerCAmelCase_ : Any = FlaxViTModelTester(self ) lowerCAmelCase_ : List[str] = FlaxBertModelTester(self ) lowerCAmelCase_ : Tuple = vit_model_tester.prepare_config_and_inputs() lowerCAmelCase_ : List[str] = bert_model_tester.prepare_config_and_inputs() lowerCAmelCase_ : str = vision_config_and_inputs lowerCAmelCase_ : str = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class snake_case__( SCREAMING_SNAKE_CASE__, unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=snake_case__ , text_from_pt=snake_case__ , ) lowerCAmelCase_ : Tuple = 1_3 lowerCAmelCase_ : Any = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowerCAmelCase_ : List[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowerCAmelCase_ : List[Any] = random_attention_mask([batch_size, 4] ) lowerCAmelCase_ : Tuple = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def lowercase_ ( self , __lowercase , __lowercase ) -> Union[str, Any]: lowerCAmelCase_ : Any = FlaxCLIPVisionModel(snake_case__ ) lowerCAmelCase_ : Optional[int] = FlaxBertModel(snake_case__ ) return vision_model, text_model def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : List[Any] = FlaxCLIPVisionModelTester(self ) lowerCAmelCase_ : Union[str, Any] = FlaxBertModelTester(self ) lowerCAmelCase_ : Any = clip_model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Optional[int] = vision_config_and_inputs lowerCAmelCase_ : Dict = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class snake_case__( unittest.TestCase ): '''simple docstring''' @slow def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 ) lowerCAmelCase_ : Tuple = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) lowerCAmelCase_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase_ : int = processor( text=['''una foto di un gatto''', '''una foto di un cane'''] , images=snake_case__ , padding=snake_case__ , return_tensors='''np''' ) lowerCAmelCase_ : Any = model(**snake_case__ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) lowerCAmelCase_ : Tuple = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image , snake_case__ , atol=1e-3 ) )
719
import csv import tweepy # Twitter API credentials _UpperCAmelCase : int ="""""" _UpperCAmelCase : Optional[int] ="""""" _UpperCAmelCase : Dict ="""""" _UpperCAmelCase : str ="""""" def lowerCAmelCase ( lowerCAmelCase_ )-> None: # authorize twitter, initialize tweepy lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ ) auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ ) # initialize a list to hold all the tweepy Tweets lowerCAmelCase_ : Dict = [] # make initial request for most recent tweets (200 is the maximum allowed count) lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # save the id of the oldest tweet less one lowerCAmelCase_ : str = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowerCAmelCase_ ) > 0: print(f"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates lowerCAmelCase_ : Optional[Any] = api.user_timeline( screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # update the id of the oldest tweet less one lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1 print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f: lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(lowerCAmelCase_ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
619
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : int =logging.get_logger(__name__) _UpperCAmelCase : int ={ """s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""", } class snake_case__( __lowerCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = """open-llama""" def __init__( self , __lowercase=1_0_0_0_0_0 , __lowercase=4_0_9_6 , __lowercase=1_1_0_0_8 , __lowercase=3_2 , __lowercase=3_2 , __lowercase="silu" , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-6 , __lowercase=True , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=False , __lowercase=True , __lowercase=0.1 , __lowercase=0.1 , __lowercase=True , __lowercase=True , __lowercase=None , **__lowercase , ) -> Optional[int]: lowerCAmelCase_ : Optional[int] = vocab_size lowerCAmelCase_ : Optional[Any] = max_position_embeddings lowerCAmelCase_ : str = hidden_size lowerCAmelCase_ : Any = intermediate_size lowerCAmelCase_ : Optional[int] = num_hidden_layers lowerCAmelCase_ : int = num_attention_heads lowerCAmelCase_ : Union[str, Any] = hidden_act lowerCAmelCase_ : Tuple = initializer_range lowerCAmelCase_ : Optional[Any] = rms_norm_eps lowerCAmelCase_ : Optional[int] = use_cache lowerCAmelCase_ : List[str] = kwargs.pop( '''use_memorry_efficient_attention''' , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : List[Any] = hidden_dropout_prob lowerCAmelCase_ : Union[str, Any] = attention_dropout_prob lowerCAmelCase_ : Tuple = use_stable_embedding lowerCAmelCase_ : Union[str, Any] = shared_input_output_embedding lowerCAmelCase_ : Tuple = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) def lowercase_ ( self ) -> int: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE_ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f"""got {self.rope_scaling}""" ) lowerCAmelCase_ : str = self.rope_scaling.get('''type''' , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Optional[int] = self.rope_scaling.get('''factor''' , SCREAMING_SNAKE_CASE_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
720
from math import sqrt def lowerCAmelCase ( lowerCAmelCase_ )-> bool: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase_ : str = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase_ : List[Any] = False for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase_ : Any = False break # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool" return status def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) ) lowerCAmelCase_ : List[Any] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowerCAmelCase_ ) ): for j in range(i + 1 , len(lowerCAmelCase_ ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase_ : Tuple = 0 # filters actual prime numbers. lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0] # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> int: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase_ : List[str] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(lowerCAmelCase_ ): ans.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase_ : Any = 2 lowerCAmelCase_ : List[str] = number if number == 0 or number == 1: ans.append(lowerCAmelCase_ ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowerCAmelCase_ ): while quotient != 1: if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0): ans.append(lowerCAmelCase_ ) quotient /= factor else: factor += 1 else: ans.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = 0 # prime factorization of 'number' lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : Dict = max(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> str: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = 0 # prime factorization of 'number' lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int" assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool" return number % 2 == 0 def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int" assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool" return number % 2 != 0 def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ ) ), "'number' must been an int, even and > 2" lowerCAmelCase_ : Union[str, Any] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ ) lowerCAmelCase_ : Any = len(lowerCAmelCase_ ) # run variable for while-loops. lowerCAmelCase_ : List[Any] = 0 lowerCAmelCase_ : List[Any] = None # exit variable. for break up the loops lowerCAmelCase_ : int = True while i < len_pn and loop: lowerCAmelCase_ : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase_ : Union[str, Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (len(lowerCAmelCase_ ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase_ : List[str] = 0 while numbera != 0: lowerCAmelCase_ : int = numbera % numbera lowerCAmelCase_ : Union[str, Any] = numbera lowerCAmelCase_ : Tuple = rest # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase_ : Dict = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ ) elif numbera == 1 or numbera == 1: lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : str = 0 lowerCAmelCase_ : List[str] = 0 lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ): ans *= n else: lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ): ans *= n done.append(lowerCAmelCase_ ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ): ans *= n done.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowerCAmelCase_ ): ans += 1 # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime( lowerCAmelCase_ ), "'ans' must been a prime number and from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict: assert ( is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number lowerCAmelCase_ : Dict = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowerCAmelCase_ ): number += 1 while number < p_number_a: ans.append(lowerCAmelCase_ ) number += 1 # fetch the next prime number. while not is_prime(lowerCAmelCase_ ): number += 1 # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ans[0] != p_number_a and ans[len(lowerCAmelCase_ ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase_ : List[str] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(lowerCAmelCase_ ) # precondition assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ ) # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (divisors[0] == 1) and (divisors[len(lowerCAmelCase_ ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) ) # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase_ : Tuple = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Optional[Any] = 1 lowerCAmelCase_ : Tuple = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase_ : Any = ans ans += fiba lowerCAmelCase_ : Dict = tmp return ans
619
0
from __future__ import annotations import requests _UpperCAmelCase : List[str] =set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "new" , lowerCAmelCase_ = None )-> Dict: lowerCAmelCase_ : Any = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(lowerCAmelCase_ ) - valid_terms ) ): lowerCAmelCase_ : Optional[int] = f"""Invalid search term: {invalid_search_terms}""" raise ValueError(lowerCAmelCase_ ) lowerCAmelCase_ : int = requests.get( f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , ) if response.status_code == 429: raise requests.HTTPError lowerCAmelCase_ : Optional[Any] = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(lowerCAmelCase_ )} lowerCAmelCase_ : Tuple = {} for id_ in range(lowerCAmelCase_ ): lowerCAmelCase_ : List[str] = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
721
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. _UpperCAmelCase : Tuple =10 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int: for i in range(lowerCAmelCase_ , lowerCAmelCase_ ): if array[i] == target: return i return -1 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int: lowerCAmelCase_ : Any = 0 lowerCAmelCase_ : int = len(lowerCAmelCase_ ) while left <= right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1 lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: lowerCAmelCase_ : Dict = one_third - 1 elif array[two_third] < target: lowerCAmelCase_ : List[Any] = two_third + 1 else: lowerCAmelCase_ : Union[str, Any] = one_third + 1 lowerCAmelCase_ : Tuple = two_third - 1 else: return -1 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int: if left < right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1 lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip() _UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." _UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip()) _UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target) _UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f"""Iterative search: {target} found at positions: {resulta}""") print(f"""Recursive search: {target} found at positions: {resulta}""") else: print("""Not found""")
619
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : Dict =logging.get_logger(__name__) _UpperCAmelCase : List[Any] ={ """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""", """google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""", """google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class snake_case__( lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = """big_bird""" def __init__( self , __lowercase=5_0_3_5_8 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu_new" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=4_0_9_6 , __lowercase=2 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=True , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=6_6 , __lowercase="block_sparse" , __lowercase=True , __lowercase=False , __lowercase=6_4 , __lowercase=3 , __lowercase=None , **__lowercase , ) -> List[str]: super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , sep_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , ) lowerCAmelCase_ : str = vocab_size lowerCAmelCase_ : Tuple = max_position_embeddings lowerCAmelCase_ : Dict = hidden_size lowerCAmelCase_ : Dict = num_hidden_layers lowerCAmelCase_ : List[Any] = num_attention_heads lowerCAmelCase_ : List[str] = intermediate_size lowerCAmelCase_ : Optional[int] = hidden_act lowerCAmelCase_ : Optional[int] = hidden_dropout_prob lowerCAmelCase_ : Tuple = attention_probs_dropout_prob lowerCAmelCase_ : List[str] = initializer_range lowerCAmelCase_ : Optional[int] = type_vocab_size lowerCAmelCase_ : int = layer_norm_eps lowerCAmelCase_ : Dict = use_cache lowerCAmelCase_ : Dict = rescale_embeddings lowerCAmelCase_ : List[Any] = attention_type lowerCAmelCase_ : Optional[int] = use_bias lowerCAmelCase_ : str = block_size lowerCAmelCase_ : Dict = num_random_blocks lowerCAmelCase_ : List[Any] = classifier_dropout class snake_case__( lowercase__ ): '''simple docstring''' @property def lowercase_ ( self ) -> Optional[int]: if self.task == "multiple-choice": lowerCAmelCase_ : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCAmelCase_ : List[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
700
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _UpperCAmelCase : Union[str, Any] ={ """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Tuple =[ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys _UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
619
0
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , )-> List[Any]: if config_name_or_path is None: lowerCAmelCase_ : List[Any] = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base''' if generator_tokenizer_name_or_path is None: lowerCAmelCase_ : List[str] = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: lowerCAmelCase_ : List[str] = question_encoder_name_or_path lowerCAmelCase_ : List[Any] = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration # Save model. lowerCAmelCase_ : Dict = RagConfig.from_pretrained(_lowerCAmelCase ) lowerCAmelCase_ : Any = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCAmelCase_ : str = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCAmelCase_ : int = gen_config lowerCAmelCase_ : Dict = question_encoder_config lowerCAmelCase_ : Any = model_class.from_pretrained_question_encoder_generator( _lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase ) rag_model.save_pretrained(_lowerCAmelCase ) # Sanity check. model_class.from_pretrained(_lowerCAmelCase ) # Save tokenizers. lowerCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_lowerCAmelCase ) gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' ) lowerCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_lowerCAmelCase ) question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' ) if __name__ == "__main__": _UpperCAmelCase : int =argparse.ArgumentParser() parser.add_argument( """--model_type""", choices=["""rag_sequence""", """rag_token"""], required=True, type=str, help="""RAG model type: rag_sequence, rag_token""", ) parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""") parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""") parser.add_argument( """--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier""" ) parser.add_argument( """--generator_tokenizer_name_or_path""", type=str, help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""", ) parser.add_argument( """--question_encoder_tokenizer_name_or_path""", type=str, help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""", ) parser.add_argument( """--config_name_or_path""", type=str, help=( """Identifier of the model config to use, if not provided, resolves to a base config for a given""" """ ``model_type``""" ), ) _UpperCAmelCase : Optional[Any] =parser.parse_args() _UpperCAmelCase : int =Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
701
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu _UpperCAmelCase : Any =False class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase_ ( self ) -> Union[str, Any]: return 1_2 @property def lowercase_ ( self ) -> Any: return 1_2 @property def lowercase_ ( self ) -> Optional[Any]: return 3_2 @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ : Any = VQModel( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(__lowercase ) @property def lowercase_ ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = 1_2 lowerCAmelCase_ : int = 1_2 lowerCAmelCase_ : Union[str, Any] = { '''attention_bias''': True, '''cross_attention_dim''': 3_2, '''attention_head_dim''': height * width, '''num_attention_heads''': 1, '''num_vector_embeds''': self.num_embed, '''num_embeds_ada_norm''': self.num_embeds_ada_norm, '''norm_num_groups''': 3_2, '''sample_size''': width, '''activation_fn''': '''geglu-approximate''', } lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase ) return model def lowercase_ ( self ) -> str: lowerCAmelCase_ : List[Any] = '''cpu''' lowerCAmelCase_ : Any = self.dummy_vqvae lowerCAmelCase_ : str = self.dummy_text_encoder lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer lowerCAmelCase_ : int = self.dummy_transformer lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase ) lowerCAmelCase_ : Dict = VQDiffusionPipeline( vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) lowerCAmelCase_ : int = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = '''teddy bear playing in the pool''' lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase_ : Union[str, Any] = output.images lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : List[Any] = pipe( [prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0] lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : Optional[Any] = '''cpu''' lowerCAmelCase_ : str = self.dummy_vqvae lowerCAmelCase_ : Dict = self.dummy_text_encoder lowerCAmelCase_ : List[Any] = self.dummy_tokenizer lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings( learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) lowerCAmelCase_ : List[str] = VQDiffusionPipeline( vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = '''teddy bear playing in the pool''' lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase_ : str = output.images lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = pipe( [prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0] lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> int: lowerCAmelCase_ : Tuple = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' ) lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' ) lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase ) pipeline.set_progress_bar_config(disable=__lowercase ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Optional[int] = pipeline( '''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , ) lowerCAmelCase_ : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) assert np.abs(expected_image - image ).max() < 2.0
619
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _UpperCAmelCase : str ={ 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[int] =['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : str =[ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Dict =[ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys _UpperCAmelCase : Any =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
702
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: _UpperCAmelCase : Dict =None _UpperCAmelCase : Tuple =logging.get_logger(__name__) _UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} _UpperCAmelCase : Any ={ """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", }, """tokenizer_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""", }, } _UpperCAmelCase : Dict ={ """xlnet-base-cased""": None, """xlnet-large-cased""": None, } _UpperCAmelCase : Tuple ="""▁""" # Segments (not really needed) _UpperCAmelCase : str =0 _UpperCAmelCase : List[str] =1 _UpperCAmelCase : int =2 _UpperCAmelCase : Any =3 _UpperCAmelCase : List[Any] =4 class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Any = """left""" SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]: # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token super().__init__( vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , ) lowerCAmelCase_ : List[Any] = 3 lowerCAmelCase_ : Dict = do_lower_case lowerCAmelCase_ : Dict = remove_space lowerCAmelCase_ : List[str] = keep_accents lowerCAmelCase_ : List[str] = vocab_file lowerCAmelCase_ : str = False if not self.vocab_file else True def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Tuple = [self.sep_token_id] lowerCAmelCase_ : Any = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Optional[Any] = [self.sep_token_id] lowerCAmelCase_ : List[Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__lowercase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ : str = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ): copyfile(self.vocab_file , __lowercase ) return (out_vocab_file,)
619
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase : Union[str, Any] ={ "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Dict =["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : str =[ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys _UpperCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
703
import math import qiskit def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts: if ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) ): raise TypeError('''inputs must be integers.''' ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError('''inputs must be positive.''' ) if ( (math.floor(lowerCAmelCase_ ) != input_a) or (math.floor(lowerCAmelCase_ ) != input_a) or (math.floor(lowerCAmelCase_ ) != carry_in) ): raise ValueError('''inputs must be exact integers.''' ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError('''inputs must be less or equal to 2.''' ) # build registers lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' ) lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' ) # list the entries lowerCAmelCase_ : Any = [input_a, input_a, carry_in] lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' ) lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 ) return job.result().get_counts(lowerCAmelCase_ ) if __name__ == "__main__": print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
619
0
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor _UpperCAmelCase : Dict =logging.get_logger(__name__) class snake_case__( snake_case__ ): '''simple docstring''' def __init__( self , *__lowercase , **__lowercase ) -> Optional[Any]: warnings.warn( '''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DPTImageProcessor instead.''' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
704
import re def lowerCAmelCase ( lowerCAmelCase_ )-> bool: lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' ) if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
619
0
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class snake_case__( _UpperCAmelCase, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def lowercase_ ( self , __lowercase=0 ) -> int: lowerCAmelCase_ : List[str] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(A_ ) ) lowerCAmelCase_ : Optional[int] = np.random.RandomState(A_ ) lowerCAmelCase_ : List[Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.75, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=A_ ) lowerCAmelCase_ : Any = self.get_dummy_inputs() lowerCAmelCase_ : Optional[Any] = pipe(**A_ ).images lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_2_8, 1_2_8, 3) lowerCAmelCase_ : Dict = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=A_ ) pipe.set_progress_bar_config(disable=A_ ) lowerCAmelCase_ : Optional[Any] = self.get_dummy_inputs() lowerCAmelCase_ : Any = pipe(**A_ ).images lowerCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) lowerCAmelCase_ : Tuple = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> str: lowerCAmelCase_ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase_ : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) # warmup pass to apply optimizations lowerCAmelCase_ : Any = pipe(**self.get_dummy_inputs() ) lowerCAmelCase_ : str = self.get_dummy_inputs() lowerCAmelCase_ : Optional[int] = pipe(**A_ ).images lowerCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) lowerCAmelCase_ : List[Any] = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase_ : str = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) lowerCAmelCase_ : List[str] = self.get_dummy_inputs() lowerCAmelCase_ : Dict = pipe(**A_ ).images lowerCAmelCase_ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) lowerCAmelCase_ : Optional[int] = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase_ : Tuple = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) lowerCAmelCase_ : Dict = self.get_dummy_inputs() lowerCAmelCase_ : List[str] = pipe(**A_ ).images lowerCAmelCase_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) lowerCAmelCase_ : Optional[int] = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self ) -> Any: lowerCAmelCase_ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase_ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) lowerCAmelCase_ : List[Any] = self.get_dummy_inputs() lowerCAmelCase_ : Tuple = pipe(**A_ ).images lowerCAmelCase_ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) lowerCAmelCase_ : Union[str, Any] = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class snake_case__( unittest.TestCase ): '''simple docstring''' @property def lowercase_ ( self ) -> int: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : List[str] = ort.SessionOptions() lowerCAmelCase_ : List[Any] = False return options def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Optional[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase_ : Any = init_image.resize((7_6_8, 5_1_2) ) # using the PNDM scheduler by default lowerCAmelCase_ : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=A_ ) lowerCAmelCase_ : str = '''A fantasy landscape, trending on artstation''' lowerCAmelCase_ : List[Any] = np.random.RandomState(0 ) lowerCAmelCase_ : Union[str, Any] = pipe( prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=A_ , output_type='''np''' , ) lowerCAmelCase_ : List[Any] = output.images lowerCAmelCase_ : Union[str, Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 7_6_8, 3) lowerCAmelCase_ : Union[str, Any] = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase_ : Dict = init_image.resize((7_6_8, 5_1_2) ) lowerCAmelCase_ : Union[str, Any] = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) lowerCAmelCase_ : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=A_ , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=A_ ) lowerCAmelCase_ : str = '''A fantasy landscape, trending on artstation''' lowerCAmelCase_ : List[str] = np.random.RandomState(0 ) lowerCAmelCase_ : Tuple = pipe( prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=A_ , output_type='''np''' , ) lowerCAmelCase_ : Tuple = output.images lowerCAmelCase_ : int = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 7_6_8, 3) lowerCAmelCase_ : List[Any] = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
705
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _UpperCAmelCase : Any =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""] def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None: super().__init__(**__lowercase ) lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4} lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : List[Any] = do_resize lowerCAmelCase_ : Optional[int] = size # Default value set here for backwards compatibility where the value in config is None lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 lowerCAmelCase_ : Tuple = resample lowerCAmelCase_ : Optional[int] = do_rescale lowerCAmelCase_ : Any = rescale_factor lowerCAmelCase_ : List[str] = do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray: lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) if "shortest_edge" not in size: raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" ) lowerCAmelCase_ : Optional[int] = size['''shortest_edge'''] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct ) lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase ) else: # warping (no cropping) when evaluated at 384 or larger return resize( __lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any: return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray: return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image: lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct lowerCAmelCase_ : str = resample if resample is not None else self.resample lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std lowerCAmelCase_ : int = size if size is not None else self.size lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images] if do_resize: lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images] if do_rescale: lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images] if do_normalize: lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images] lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] lowerCAmelCase_ : Dict = {'''pixel_values''': images} return BatchFeature(data=__lowercase , tensor_type=__lowercase )
619
0
import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home _UpperCAmelCase =HUGGINGFACE_HUB_CACHE _UpperCAmelCase ='config.json' _UpperCAmelCase ='diffusion_pytorch_model.bin' _UpperCAmelCase ='diffusion_flax_model.msgpack' _UpperCAmelCase ='model.onnx' _UpperCAmelCase ='diffusion_pytorch_model.safetensors' _UpperCAmelCase ='weights.pb' _UpperCAmelCase ='https://huggingface.co' _UpperCAmelCase =default_cache_path _UpperCAmelCase ='diffusers_modules' _UpperCAmelCase =os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules""")) _UpperCAmelCase =['fp16', 'non-ema'] _UpperCAmelCase ='.self_attn'
706
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Optional[int] =logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] ={ """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese""" def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str: super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) lowerCAmelCase_ : Optional[Any] = vocab_size lowerCAmelCase_ : Tuple = max_position_embeddings lowerCAmelCase_ : Optional[Any] = hidden_size lowerCAmelCase_ : Optional[Any] = num_hidden_layers lowerCAmelCase_ : str = num_attention_heads lowerCAmelCase_ : str = intermediate_multiple_size lowerCAmelCase_ : str = hidden_act lowerCAmelCase_ : Dict = rotary_pct lowerCAmelCase_ : Union[str, Any] = rotary_emb_base lowerCAmelCase_ : int = initializer_range lowerCAmelCase_ : Any = layer_norm_eps lowerCAmelCase_ : Optional[Any] = use_cache lowerCAmelCase_ : Tuple = attention_dropout lowerCAmelCase_ : Dict = hidden_dropout
619
0
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : List[Any] =logging.get_logger(__name__) # TODO Update this _UpperCAmelCase : Tuple ={ "facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json", # See all ESM models at https://huggingface.co/models?filter=esm } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = """esm""" def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1_0_2_6 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase="absolute" , __lowercase=True , __lowercase=None , __lowercase=False , __lowercase=False , __lowercase=None , __lowercase=None , **__lowercase , ) -> Dict: super().__init__(pad_token_id=__UpperCamelCase , mask_token_id=__UpperCamelCase , **__UpperCamelCase ) lowerCAmelCase_ : Tuple = vocab_size lowerCAmelCase_ : int = hidden_size lowerCAmelCase_ : Dict = num_hidden_layers lowerCAmelCase_ : Union[str, Any] = num_attention_heads lowerCAmelCase_ : int = intermediate_size lowerCAmelCase_ : Any = hidden_dropout_prob lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase_ : Tuple = max_position_embeddings lowerCAmelCase_ : Any = initializer_range lowerCAmelCase_ : Any = layer_norm_eps lowerCAmelCase_ : str = position_embedding_type lowerCAmelCase_ : Tuple = use_cache lowerCAmelCase_ : List[Any] = emb_layer_norm_before lowerCAmelCase_ : Any = token_dropout lowerCAmelCase_ : Dict = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) lowerCAmelCase_ : Any = EsmFoldConfig() elif isinstance(__UpperCamelCase , __UpperCamelCase ): lowerCAmelCase_ : Union[str, Any] = EsmFoldConfig(**__UpperCamelCase ) lowerCAmelCase_ : Union[str, Any] = esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) lowerCAmelCase_ : int = get_default_vocab_list() else: lowerCAmelCase_ : List[str] = vocab_list else: lowerCAmelCase_ : str = None lowerCAmelCase_ : int = None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , __UpperCamelCase ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = super().to_dict() if isinstance(self.esmfold_config , __UpperCamelCase ): lowerCAmelCase_ : int = self.esmfold_config.to_dict() return output @dataclass class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : List[Any] = True SCREAMING_SNAKE_CASE__ : str = False SCREAMING_SNAKE_CASE__ : List[Any] = False SCREAMING_SNAKE_CASE__ : Optional[int] = False SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 SCREAMING_SNAKE_CASE__ : int = True SCREAMING_SNAKE_CASE__ : List[Any] = False SCREAMING_SNAKE_CASE__ : List[str] = 128 SCREAMING_SNAKE_CASE__ : Tuple = None def lowercase_ ( self ) -> Any: if self.trunk is None: lowerCAmelCase_ : Dict = TrunkConfig() elif isinstance(self.trunk , __UpperCamelCase ): lowerCAmelCase_ : Any = TrunkConfig(**self.trunk ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Dict = asdict(self ) lowerCAmelCase_ : List[Any] = self.trunk.to_dict() return output @dataclass class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = 48 SCREAMING_SNAKE_CASE__ : int = 1_024 SCREAMING_SNAKE_CASE__ : int = 128 SCREAMING_SNAKE_CASE__ : Tuple = 32 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 32 SCREAMING_SNAKE_CASE__ : Dict = 32 SCREAMING_SNAKE_CASE__ : List[str] = 0 SCREAMING_SNAKE_CASE__ : Any = 0 SCREAMING_SNAKE_CASE__ : int = False SCREAMING_SNAKE_CASE__ : List[str] = 4 SCREAMING_SNAKE_CASE__ : int = 128 SCREAMING_SNAKE_CASE__ : List[Any] = None def lowercase_ ( self ) -> str: if self.structure_module is None: lowerCAmelCase_ : int = StructureModuleConfig() elif isinstance(self.structure_module , __UpperCamelCase ): lowerCAmelCase_ : Dict = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" ) lowerCAmelCase_ : Dict = self.sequence_state_dim // self.sequence_head_width lowerCAmelCase_ : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" ) if self.dropout >= 0.4: raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Any = asdict(self ) lowerCAmelCase_ : Tuple = self.structure_module.to_dict() return output @dataclass class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = 384 SCREAMING_SNAKE_CASE__ : Any = 128 SCREAMING_SNAKE_CASE__ : Any = 16 SCREAMING_SNAKE_CASE__ : Any = 128 SCREAMING_SNAKE_CASE__ : Optional[Any] = 12 SCREAMING_SNAKE_CASE__ : Optional[Any] = 4 SCREAMING_SNAKE_CASE__ : str = 8 SCREAMING_SNAKE_CASE__ : str = 0.1 SCREAMING_SNAKE_CASE__ : List[str] = 8 SCREAMING_SNAKE_CASE__ : List[str] = 1 SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 SCREAMING_SNAKE_CASE__ : Optional[int] = 7 SCREAMING_SNAKE_CASE__ : Tuple = 10 SCREAMING_SNAKE_CASE__ : Optional[int] = 1e-8 SCREAMING_SNAKE_CASE__ : int = 1e5 def lowercase_ ( self ) -> Optional[Any]: return asdict(self ) def lowerCAmelCase ( )-> Dict: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
707
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]: lowerCAmelCase_ : str = parent lowerCAmelCase_ : Optional[Any] = batch_size lowerCAmelCase_ : List[Any] = is_training lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss lowerCAmelCase_ : List[Any] = num_queries lowerCAmelCase_ : str = num_channels lowerCAmelCase_ : Dict = min_size lowerCAmelCase_ : List[str] = max_size lowerCAmelCase_ : Any = num_labels lowerCAmelCase_ : str = mask_feature_size def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowercase ) lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase ) lowerCAmelCase_ : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5 ).float() lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long() lowerCAmelCase_ : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase_ ( self ) -> List[str]: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs() lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def lowercase_ ( self , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int: with torch.no_grad(): lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase ) lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowercase , __lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase ) model.to(__lowercase ) model.eval() def comm_check_on_output(__lowercase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase ) lowerCAmelCase_ : Any = model(__lowercase ) comm_check_on_output(__lowercase ) lowerCAmelCase_ : List[Any] = model( pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase ) comm_check_on_output(__lowercase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Tuple = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : List[str] = False def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Any = MaskFormerModelTester(self ) lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase ) def lowercase_ ( self ) -> Any: self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def lowercase_ ( self ) -> str: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def lowercase_ ( self ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowercase_ ( self ) -> Dict: pass def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Tuple = model_class(__lowercase ) lowerCAmelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : str = [*signature.parameters.keys()] lowerCAmelCase_ : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) @slow def lowercase_ ( self ) -> Optional[int]: for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2 lowerCAmelCase_ : List[Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ), '''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(), } lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase ) lowerCAmelCase_ : Dict = model(**__lowercase ) self.assertTrue(outputs.loss is not None ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase ) def lowercase_ ( self ) -> int: lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase ) lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase ) self.assertTrue(outputs.attentions is not None ) def lowercase_ ( self ) -> List[str]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ : int = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Optional[Any] = model_class(__lowercase ) model.to(__lowercase ) model.train() lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss loss.backward() def lowercase_ ( self ) -> Optional[int]: # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ : Any = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Any = model_class(__lowercase ) model.to(__lowercase ) model.train() lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ) lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowercase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _UpperCAmelCase : Dict =1E-4 def lowerCAmelCase ( )-> Any: lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class snake_case__( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase_ ( self ) -> Union[str, Any]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase ) lowerCAmelCase_ : Dict = self.default_image_processor lowerCAmelCase_ : int = prepare_img() lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : List[str] = model(**__lowercase ) lowerCAmelCase_ : Union[str, Any] = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) lowerCAmelCase_ : List[Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) lowerCAmelCase_ : int = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : Tuple = self.default_image_processor lowerCAmelCase_ : Optional[Any] = prepare_img() lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : Dict = model(**__lowercase ) # masks_queries_logits lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ : Tuple = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) # class_queries_logits lowerCAmelCase_ : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ : Dict = torch.tensor( [ [1.6_512e00, -5.2_572e00, -3.3_519e00], [3.6_169e-02, -5.9_025e00, -2.9_313e00], [1.0_766e-04, -7.7_630e00, -5.1_263e00], ] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : str = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : int = self.default_image_processor lowerCAmelCase_ : Optional[Any] = prepare_img() lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : str = model(**__lowercase ) # masks_queries_logits lowerCAmelCase_ : List[str] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) # class_queries_logits lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ : int = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Dict = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : str = self.default_image_processor lowerCAmelCase_ : Union[str, Any] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase ) lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']] lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']] with torch.no_grad(): lowerCAmelCase_ : str = model(**__lowercase ) self.assertTrue(outputs.loss is not None )
619
0
from typing import TYPE_CHECKING from ..utils import _LazyModule _UpperCAmelCase : int ={ """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys _UpperCAmelCase : str =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
708
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase__ ) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} ) SCREAMING_SNAKE_CASE__ : str = "audio" SCREAMING_SNAKE_CASE__ : str = "transcription" def lowercase_ ( self , __lowercase ) -> int: if self.audio_column not in features: raise ValueError(f"""Column {self.audio_column} is not present in features.""" ) if not isinstance(features[self.audio_column] , __lowercase ): raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" ) lowerCAmelCase_ : List[str] = copy.deepcopy(self ) lowerCAmelCase_ : Optional[Any] = self.input_schema.copy() lowerCAmelCase_ : Optional[Any] = features[self.audio_column] lowerCAmelCase_ : List[str] = input_schema return task_template @property def lowercase_ ( self ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
619
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> int: lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp() lowerCAmelCase_ : List[str] = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] lowerCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) lowerCAmelCase_ : Tuple = { 'do_resize': True, 'size': 2_0, 'do_center_crop': True, 'crop_size': 1_8, 'do_normalize': True, 'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], 'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } lowerCAmelCase_ : str = os.path.join(self.tmpdirname , UpperCamelCase__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(UpperCamelCase__ , UpperCamelCase__ ) def lowercase_ ( self , **__lowercase ) -> Any: return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def lowercase_ ( self , **__lowercase ) -> List[Any]: return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def lowercase_ ( self , **__lowercase ) -> Optional[int]: return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def lowercase_ ( self ) -> Dict: shutil.rmtree(self.tmpdirname ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] lowerCAmelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Tuple = self.get_tokenizer() lowerCAmelCase_ : Dict = self.get_rust_tokenizer() lowerCAmelCase_ : Dict = self.get_image_processor() lowerCAmelCase_ : Optional[int] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) processor_slow.save_pretrained(self.tmpdirname ) lowerCAmelCase_ : Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ ) lowerCAmelCase_ : List[Any] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) processor_fast.save_pretrained(self.tmpdirname ) lowerCAmelCase_ : Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ ) self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ ) self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Union[str, Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowerCAmelCase_ : int = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 ) lowerCAmelCase_ : List[Any] = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase__ ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : int = self.get_image_processor() lowerCAmelCase_ : str = self.get_tokenizer() lowerCAmelCase_ : str = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCAmelCase_ : Union[str, Any] = self.prepare_image_inputs() lowerCAmelCase_ : int = image_processor(UpperCamelCase__ , return_tensors='''np''' ) lowerCAmelCase_ : Dict = processor(images=UpperCamelCase__ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : List[str] = self.get_image_processor() lowerCAmelCase_ : Optional[Any] = self.get_tokenizer() lowerCAmelCase_ : List[str] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCAmelCase_ : Tuple = 'lower newer' lowerCAmelCase_ : Tuple = processor(text=UpperCamelCase__ ) lowerCAmelCase_ : int = tokenizer(UpperCamelCase__ , padding='''max_length''' , max_length=6_4 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : Dict = self.get_image_processor() lowerCAmelCase_ : List[Any] = self.get_tokenizer() lowerCAmelCase_ : str = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCAmelCase_ : str = 'lower newer' lowerCAmelCase_ : Optional[int] = self.prepare_image_inputs() lowerCAmelCase_ : Optional[int] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase__ ): processor() def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : List[str] = self.get_image_processor() lowerCAmelCase_ : List[str] = self.get_tokenizer() lowerCAmelCase_ : Union[str, Any] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCAmelCase_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase_ : List[str] = processor.batch_decode(UpperCamelCase__ ) lowerCAmelCase_ : Any = tokenizer.batch_decode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Dict = self.get_image_processor() lowerCAmelCase_ : int = self.get_tokenizer() lowerCAmelCase_ : Union[str, Any] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCAmelCase_ : str = 'lower newer' lowerCAmelCase_ : List[str] = self.prepare_image_inputs() lowerCAmelCase_ : List[Any] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
709
_UpperCAmelCase : int =frozenset( [ """prompt""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""]) _UpperCAmelCase : Dict =frozenset([]) _UpperCAmelCase : int =frozenset(["""image"""]) _UpperCAmelCase : Tuple =frozenset( [ """image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : int =frozenset(["""image"""]) _UpperCAmelCase : str =frozenset( [ """prompt""", """image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""]) _UpperCAmelCase : Optional[int] =frozenset( [ # Text guided image variation with an image mask """prompt""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""]) _UpperCAmelCase : Optional[Any] =frozenset( [ # image variation with an image mask """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""]) _UpperCAmelCase : Union[str, Any] =frozenset( [ """example_image""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""]) _UpperCAmelCase : Any =frozenset(["""class_labels"""]) _UpperCAmelCase : List[Any] =frozenset(["""class_labels"""]) _UpperCAmelCase : int =frozenset(["""batch_size"""]) _UpperCAmelCase : str =frozenset([]) _UpperCAmelCase : str =frozenset(["""batch_size"""]) _UpperCAmelCase : Optional[Any] =frozenset([]) _UpperCAmelCase : Tuple =frozenset( [ """prompt""", """audio_length_in_s""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""]) _UpperCAmelCase : List[str] =frozenset(["""input_tokens"""]) _UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""])
619
0
from manim import * class snake_case__( a__ ): '''simple docstring''' def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Tuple = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase_ : List[Any] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : str = [mem.copy() for i in range(6 )] lowerCAmelCase_ : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) lowerCAmelCase_ : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) lowerCAmelCase_ : int = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) lowerCAmelCase_ : str = Text('''CPU''' , font_size=2_4 ) lowerCAmelCase_ : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCamelCase_ ) lowerCAmelCase_ : Optional[Any] = [mem.copy() for i in range(4 )] lowerCAmelCase_ : Dict = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) lowerCAmelCase_ : Union[str, Any] = Text('''GPU''' , font_size=2_4 ) lowerCAmelCase_ : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) gpu.move_to([-1, -1, 0] ) self.add(lowerCamelCase_ ) lowerCAmelCase_ : Optional[Any] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : Dict = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) lowerCAmelCase_ : Optional[Any] = Text('''Model''' , font_size=2_4 ) lowerCAmelCase_ : str = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) model.move_to([3, -1.0, 0] ) self.add(lowerCamelCase_ ) lowerCAmelCase_ : Dict = [] for i, rect in enumerate(lowerCamelCase_ ): rect.set_stroke(lowerCamelCase_ ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) lowerCAmelCase_ : Optional[int] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase_ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=lowerCamelCase_ , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=lowerCamelCase_ , buff=0.0 ) self.add(lowerCamelCase_ ) cpu_targs.append(lowerCamelCase_ ) lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) lowerCAmelCase_ : List[str] = Text('''Loaded Checkpoint''' , font_size=2_4 ) lowerCAmelCase_ : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , aligned_edge=lowerCamelCase_ , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) lowerCAmelCase_ : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase_ : Tuple = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase_ : str = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , ) blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) lowerCAmelCase_ : List[str] = MarkupText( f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ ) , Write(lowerCamelCase_ ) ) self.play(Write(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) ) lowerCAmelCase_ : Union[str, Any] = [] lowerCAmelCase_ : Any = [] for i, rect in enumerate(lowerCamelCase_ ): lowerCAmelCase_ : Optional[int] = fill.copy().set_fill(lowerCamelCase_ , opacity=0.7 ) target.move_to(lowerCamelCase_ ) first_animations.append(GrowFromCenter(lowerCamelCase_ , run_time=1 ) ) lowerCAmelCase_ : str = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(lowerCamelCase_ , run_time=1.5 ) ) self.play(*lowerCamelCase_ ) self.play(*lowerCamelCase_ ) self.wait()
710
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int: lowerCAmelCase_ : Dict = 1 lowerCAmelCase_ : List[Any] = 1 lowerCAmelCase_ : Optional[Any] = {1: 1} for inputa in range(2 , lowerCAmelCase_ ): lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Dict = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCAmelCase_ : Any = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCAmelCase_ : Tuple = counter if counter > pre_counter: lowerCAmelCase_ : Optional[int] = inputa lowerCAmelCase_ : Union[str, Any] = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
619
0
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_="pt" )-> Union[str, Any]: lowerCAmelCase_ : Union[str, Any] = {'''add_prefix_space''': True} if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not line.startswith(''' ''' ) else {} lowerCAmelCase_ : Optional[Any] = padding_side return tokenizer( [line] , max_length=lowerCAmelCase_ , padding='''max_length''' if pad_to_max_length else None , truncation=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , )-> Optional[Any]: lowerCAmelCase_ : Optional[Any] = input_ids.ne(lowerCAmelCase_ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class snake_case__( lowercase_ ): '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase="train" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="" , ) -> List[Any]: super().__init__() lowerCAmelCase_ : Optional[Any] = Path(UpperCamelCase__ ).joinpath(type_path + '''.source''' ) lowerCAmelCase_ : str = Path(UpperCamelCase__ ).joinpath(type_path + '''.target''' ) lowerCAmelCase_ : Optional[int] = self.get_char_lens(self.src_file ) lowerCAmelCase_ : Tuple = max_source_length lowerCAmelCase_ : str = max_target_length assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}""" lowerCAmelCase_ : Any = tokenizer lowerCAmelCase_ : List[str] = prefix if n_obs is not None: lowerCAmelCase_ : Union[str, Any] = self.src_lens[:n_obs] lowerCAmelCase_ : List[str] = src_lang lowerCAmelCase_ : List[Any] = tgt_lang def __len__( self ) -> Optional[Any]: return len(self.src_lens ) def __getitem__( self , __lowercase ) -> Optional[Any]: lowerCAmelCase_ : Optional[Any] = index + 1 # linecache starts at 1 lowerCAmelCase_ : str = self.prefix + linecache.getline(str(self.src_file ) , UpperCamelCase__ ).rstrip('''\n''' ) lowerCAmelCase_ : Optional[Any] = linecache.getline(str(self.tgt_file ) , UpperCamelCase__ ).rstrip('''\n''' ) assert source_line, f"""empty source line for index {index}""" assert tgt_line, f"""empty tgt line for index {index}""" # Need to add eos token manually for T5 if isinstance(self.tokenizer , UpperCamelCase__ ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right lowerCAmelCase_ : Tuple = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer ) lowerCAmelCase_ : Dict = self.tokenizer.generator if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer lowerCAmelCase_ : List[str] = encode_line(UpperCamelCase__ , UpperCamelCase__ , self.max_source_length , '''right''' ) lowerCAmelCase_ : Dict = encode_line(UpperCamelCase__ , UpperCamelCase__ , self.max_target_length , '''right''' ) lowerCAmelCase_ : Any = source_inputs['''input_ids'''].squeeze() lowerCAmelCase_ : List[Any] = target_inputs['''input_ids'''].squeeze() lowerCAmelCase_ : int = source_inputs['''attention_mask'''].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowercase_ ( __lowercase ) -> str: return [len(UpperCamelCase__ ) for x in Path(UpperCamelCase__ ).open().readlines()] def lowercase_ ( self , __lowercase ) -> Union[str, Any]: lowerCAmelCase_ : Any = torch.stack([x['''input_ids'''] for x in batch] ) lowerCAmelCase_ : Optional[Any] = torch.stack([x['''attention_mask'''] for x in batch] ) lowerCAmelCase_ : Dict = torch.stack([x['''decoder_input_ids'''] for x in batch] ) lowerCAmelCase_ : int = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer.pad_token_id ) lowerCAmelCase_ : List[str] = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer.pad_token_id ) lowerCAmelCase_ : Tuple = trim_batch(UpperCamelCase__ , UpperCamelCase__ ) lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = trim_batch(UpperCamelCase__ , UpperCamelCase__ , attention_mask=UpperCamelCase__ ) lowerCAmelCase_ : int = { '''input_ids''': source_ids, '''attention_mask''': source_mask, '''decoder_input_ids''': y, } return batch _UpperCAmelCase : List[Any] =getLogger(__name__) def lowerCAmelCase ( lowerCAmelCase_ )-> Any: return list(itertools.chain.from_iterable(lowerCAmelCase_ ) ) def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : Optional[Any] = get_git_info() save_json(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , '''git_log.json''' ) ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=4 , **lowerCAmelCase_ )-> Tuple: with open(lowerCAmelCase_ , '''w''' ) as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ , **lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: with open(lowerCAmelCase_ ) as f: return json.load(lowerCAmelCase_ ) def lowerCAmelCase ( )-> List[Any]: lowerCAmelCase_ : Any = git.Repo(search_parent_directories=lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = { '''repo_id''': str(lowerCAmelCase_ ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), '''hostname''': str(socket.gethostname() ), } return repo_infos def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> str: return list(map(lowerCAmelCase_ , lowerCAmelCase_ ) ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple: with open(lowerCAmelCase_ , '''wb''' ) as f: return pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ )-> str: def remove_articles(lowerCAmelCase_ ): return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , lowerCAmelCase_ ) def white_space_fix(lowerCAmelCase_ ): return " ".join(text.split() ) def remove_punc(lowerCAmelCase_ ): lowerCAmelCase_ : Optional[int] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCAmelCase_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase_ ) ) ) ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict: lowerCAmelCase_ : Tuple = normalize_answer(lowerCAmelCase_ ).split() lowerCAmelCase_ : Dict = normalize_answer(lowerCAmelCase_ ).split() lowerCAmelCase_ : Dict = Counter(lowerCAmelCase_ ) & Counter(lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = sum(common.values() ) if num_same == 0: return 0 lowerCAmelCase_ : Tuple = 1.0 * num_same / len(lowerCAmelCase_ ) lowerCAmelCase_ : int = 1.0 * num_same / len(lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = (2 * precision * recall) / (precision + recall) return fa def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]: return normalize_answer(lowerCAmelCase_ ) == normalize_answer(lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> str: assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) lowerCAmelCase_ : int = 0 for hypo, pred in zip(lowerCAmelCase_ , lowerCAmelCase_ ): em += exact_match_score(lowerCAmelCase_ , lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: em /= len(lowerCAmelCase_ ) return {"em": em} def lowerCAmelCase ( lowerCAmelCase_ )-> str: return model_prefix.startswith('''rag''' ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]: lowerCAmelCase_ : List[str] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead lowerCAmelCase_ : str = '''dropout_rate''' for p in extra_params: if getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and not hasattr(lowerCAmelCase_ , equivalent_param[p] ): logger.info('''config doesn\'t have a `{}` attribute'''.format(lowerCAmelCase_ ) ) delattr(lowerCAmelCase_ , lowerCAmelCase_ ) continue lowerCAmelCase_ : Dict = p if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) else equivalent_param[p] setattr(lowerCAmelCase_ , lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) ) delattr(lowerCAmelCase_ , lowerCAmelCase_ ) return hparams, config
711
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : str =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder""" SCREAMING_SNAKE_CASE__ : str = True def __init__( self , **__lowercase ) -> Union[str, Any]: super().__init__(**__lowercase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" lowerCAmelCase_ : str = kwargs.pop('''encoder''' ) lowerCAmelCase_ : int = encoder_config.pop('''model_type''' ) lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' ) lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase ) lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase ) lowerCAmelCase_ : Any = True @classmethod def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig: logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowerCAmelCase_ : int = True lowerCAmelCase_ : List[Any] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ : List[str] = self.encoder.to_dict() lowerCAmelCase_ : Dict = self.decoder.to_dict() lowerCAmelCase_ : Optional[Any] = self.__class__.model_type return output
619
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : List[str] =logging.get_logger(__name__) def lowerCAmelCase ( lowerCAmelCase_ )-> YolosConfig: lowerCAmelCase_ : int = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowerCAmelCase_ : int = 192 lowerCAmelCase_ : str = 768 lowerCAmelCase_ : Union[str, Any] = 12 lowerCAmelCase_ : Optional[int] = 3 lowerCAmelCase_ : Union[str, Any] = [800, 1_333] lowerCAmelCase_ : List[Any] = False elif yolos_name == "yolos_s_dWr": lowerCAmelCase_ : str = 330 lowerCAmelCase_ : List[str] = 14 lowerCAmelCase_ : List[str] = 6 lowerCAmelCase_ : Optional[Any] = 1_320 elif "yolos_s" in yolos_name: lowerCAmelCase_ : Any = 384 lowerCAmelCase_ : Optional[Any] = 1_536 lowerCAmelCase_ : List[Any] = 12 lowerCAmelCase_ : List[str] = 6 elif "yolos_b" in yolos_name: lowerCAmelCase_ : List[Any] = [800, 1_344] lowerCAmelCase_ : List[Any] = 91 lowerCAmelCase_ : str = '''huggingface/label-files''' lowerCAmelCase_ : Union[str, Any] = '''coco-detection-id2label.json''' lowerCAmelCase_ : Tuple = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ : Dict = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} lowerCAmelCase_ : str = idalabel lowerCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False )-> int: for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase_ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) lowerCAmelCase_ : List[str] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ : Dict = in_proj_weight[: config.hidden_size, :] lowerCAmelCase_ : Dict = in_proj_bias[: config.hidden_size] lowerCAmelCase_ : List[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase_ : Tuple = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase_ : List[str] = in_proj_weight[-config.hidden_size :, :] lowerCAmelCase_ : Any = in_proj_bias[-config.hidden_size :] def lowerCAmelCase ( lowerCAmelCase_ )-> str: if "backbone" in name: lowerCAmelCase_ : Any = name.replace('''backbone''' , '''vit''' ) if "cls_token" in name: lowerCAmelCase_ : Optional[Any] = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "det_token" in name: lowerCAmelCase_ : Optional[Any] = name.replace('''det_token''' , '''embeddings.detection_tokens''' ) if "mid_pos_embed" in name: lowerCAmelCase_ : Optional[Any] = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' ) if "pos_embed" in name: lowerCAmelCase_ : Optional[int] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: lowerCAmelCase_ : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "blocks" in name: lowerCAmelCase_ : Any = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: lowerCAmelCase_ : Optional[int] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCAmelCase_ : List[str] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCAmelCase_ : Union[str, Any] = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCAmelCase_ : List[str] = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCAmelCase_ : List[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCAmelCase_ : Union[str, Any] = name.replace('''mlp.fc2''' , '''output.dense''' ) if "class_embed" in name: lowerCAmelCase_ : Union[str, Any] = name.replace('''class_embed''' , '''class_labels_classifier''' ) if "bbox_embed" in name: lowerCAmelCase_ : Optional[int] = name.replace('''bbox_embed''' , '''bbox_predictor''' ) if "vit.norm" in name: lowerCAmelCase_ : List[str] = name.replace('''vit.norm''' , '''vit.layernorm''' ) return name def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> dict: for key in orig_state_dict.copy().keys(): lowerCAmelCase_ : str = orig_state_dict.pop(__lowerCAmelCase ) if "qkv" in key: lowerCAmelCase_ : Dict = key.split('''.''' ) lowerCAmelCase_ : Optional[int] = int(key_split[2] ) lowerCAmelCase_ : Union[str, Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowerCAmelCase_ : int = val[:dim, :] lowerCAmelCase_ : List[Any] = val[ dim : dim * 2, : ] lowerCAmelCase_ : List[Any] = val[-dim:, :] else: lowerCAmelCase_ : Any = val[:dim] lowerCAmelCase_ : Union[str, Any] = val[dim : dim * 2] lowerCAmelCase_ : int = val[-dim:] else: lowerCAmelCase_ : Tuple = val return orig_state_dict def lowerCAmelCase ( )-> torch.Tensor: lowerCAmelCase_ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ : Tuple = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False )-> Optional[Any]: lowerCAmelCase_ : Union[str, Any] = get_yolos_config(__lowerCAmelCase ) # load original state_dict lowerCAmelCase_ : Union[str, Any] = torch.load(__lowerCAmelCase , map_location='''cpu''' )['''model'''] # load 🤗 model lowerCAmelCase_ : Optional[int] = YolosForObjectDetection(__lowerCAmelCase ) model.eval() lowerCAmelCase_ : List[Any] = convert_state_dict(__lowerCAmelCase , __lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) # Check outputs on an image, prepared by YolosImageProcessor lowerCAmelCase_ : List[Any] = 800 if yolos_name != '''yolos_ti''' else 512 lowerCAmelCase_ : str = YolosImageProcessor(format='''coco_detection''' , size=__lowerCAmelCase ) lowerCAmelCase_ : str = image_processor(images=prepare_img() , return_tensors='''pt''' ) lowerCAmelCase_ : Dict = model(**__lowerCAmelCase ) lowerCAmelCase_ , lowerCAmelCase_ : Tuple = outputs.logits, outputs.pred_boxes lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = None, None if yolos_name == "yolos_ti": lowerCAmelCase_ : Optional[Any] = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) lowerCAmelCase_ : Optional[Any] = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": lowerCAmelCase_ : Dict = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) lowerCAmelCase_ : Union[str, Any] = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": lowerCAmelCase_ : Optional[int] = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) lowerCAmelCase_ : Any = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": lowerCAmelCase_ : Any = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) lowerCAmelCase_ : int = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": lowerCAmelCase_ : Tuple = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) lowerCAmelCase_ : Optional[Any] = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f"""Unknown yolos_name: {yolos_name}""" ) assert torch.allclose(logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __lowerCAmelCase , atol=1e-4 ) Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(f"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCAmelCase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: lowerCAmelCase_ : str = { '''yolos_ti''': '''yolos-tiny''', '''yolos_s_200_pre''': '''yolos-small''', '''yolos_s_300_pre''': '''yolos-small-300''', '''yolos_s_dWr''': '''yolos-small-dwr''', '''yolos_base''': '''yolos-base''', } print('''Pushing to the hub...''' ) lowerCAmelCase_ : Union[str, Any] = model_mapping[yolos_name] image_processor.push_to_hub(__lowerCAmelCase , organization='''hustvl''' ) model.push_to_hub(__lowerCAmelCase , organization='''hustvl''' ) if __name__ == "__main__": _UpperCAmelCase : Union[str, Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( """--yolos_name""", default="""yolos_s_200_pre""", type=str, help=( """Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',""" """ 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'.""" ), ) parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _UpperCAmelCase : Optional[int] =parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
712
from __future__ import annotations from math import pi def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]: if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if inductance < 0: raise ValueError('''Inductance cannot be negative''' ) if frequency < 0: raise ValueError('''Frequency cannot be negative''' ) if reactance < 0: raise ValueError('''Inductive reactance cannot be negative''' ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
619
0
from __future__ import annotations def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None )-> list[list[str]]: lowerCAmelCase_ : Optional[Any] = word_bank or [] # create a table lowerCAmelCase_ : int = len(a_ ) + 1 lowerCAmelCase_ : list[list[list[str]]] = [] for _ in range(a_ ): table.append([] ) # seed value lowerCAmelCase_ : Union[str, Any] = [[]] # because empty string has empty combination # iterate through the indices for i in range(a_ ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(a_ )] == word: lowerCAmelCase_ : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(a_ )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(a_ )]: combination.reverse() return table[len(a_ )] if __name__ == "__main__": print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""])) print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""])) print( all_construct( """hexagonosaurus""", ["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""], ) )
713
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging _UpperCAmelCase : Tuple =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = """linear""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine""" SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts""" SCREAMING_SNAKE_CASE__ : List[str] = """polynomial""" SCREAMING_SNAKE_CASE__ : Dict = """constant""" SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup""" SCREAMING_SNAKE_CASE__ : str = """piecewise_constant""" def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple: return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) ) return 1.0 return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int: lowerCAmelCase_ : Optional[int] = {} lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' ) for rule_str in rule_list[:-1]: lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' ) lowerCAmelCase_ : int = int(lowerCAmelCase_ ) lowerCAmelCase_ : str = float(lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = value lowerCAmelCase_ : int = float(rule_list[-1] ) def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ): def rule_func(lowerCAmelCase_ ) -> float: lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(lowerCAmelCase_ ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any: lowerCAmelCase_ : Dict = optimizer.defaults['''lr'''] if not (lr_init > lr_end): raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lowerCAmelCase_ : List[Any] = lr_init - lr_end lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _UpperCAmelCase : Union[str, Any] ={ SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]: lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ ) lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , ) return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
619
0
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]: _validate_point(UpperCAmelCase__ ) _validate_point(UpperCAmelCase__ ) if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ): raise ValueError('''Both points must be in the same n-dimensional space''' ) return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) ) def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: if point: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): for item in point: if not isinstance(UpperCAmelCase__ , (int, float) ): lowerCAmelCase_ : Union[str, Any] = ( '''Expected a list of numbers as input, found ''' f"""{type(UpperCAmelCase__ ).__name__}""" ) raise TypeError(UpperCAmelCase__ ) else: lowerCAmelCase_ : Dict = f"""Expected a list of numbers as input, found {type(UpperCAmelCase__ ).__name__}""" raise TypeError(UpperCAmelCase__ ) else: raise ValueError('''Missing an input''' ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]: _validate_point(UpperCAmelCase__ ) _validate_point(UpperCAmelCase__ ) if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ): raise ValueError('''Both points must be in the same n-dimensional space''' ) return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
714
from __future__ import annotations def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative in a semiconductor''' ) elif hole_conc < 0: raise ValueError('''Hole concentration cannot be negative in a semiconductor''' ) elif intrinsic_conc < 0: raise ValueError( '''Intrinsic concentration cannot be negative in a semiconductor''' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
619
0
from __future__ import annotations import math import random from typing import Any class snake_case__: '''simple docstring''' def __init__( self ) -> List[Any]: lowerCAmelCase_ : Optional[Any] = [] lowerCAmelCase_ : int = 0 lowerCAmelCase_ : Any = 0 def lowercase_ ( self ) -> Union[str, Any]: return self.head == self.tail def lowercase_ ( self , __lowercase ) -> int: self.data.append(__UpperCamelCase ) lowerCAmelCase_ : Optional[Any] = self.tail + 1 def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Optional[Any] = self.data[self.head] lowerCAmelCase_ : int = self.head + 1 return ret def lowercase_ ( self ) -> Union[str, Any]: return self.tail - self.head def lowercase_ ( self ) -> Dict: print(self.data ) print('''**************''' ) print(self.data[self.head : self.tail] ) class snake_case__: '''simple docstring''' def __init__( self , __lowercase ) -> int: lowerCAmelCase_ : Dict = data lowerCAmelCase_ : str = None lowerCAmelCase_ : Dict = None lowerCAmelCase_ : Any = 1 def lowercase_ ( self ) -> List[Any]: return self.data def lowercase_ ( self ) -> Dict: return self.left def lowercase_ ( self ) -> str: return self.right def lowercase_ ( self ) -> Tuple: return self.height def lowercase_ ( self , __lowercase ) -> int: lowerCAmelCase_ : Optional[int] = data def lowercase_ ( self , __lowercase ) -> Tuple: lowerCAmelCase_ : Optional[Any] = node def lowercase_ ( self , __lowercase ) -> Optional[int]: lowerCAmelCase_ : Optional[int] = node def lowercase_ ( self , __lowercase ) -> Tuple: lowerCAmelCase_ : List[str] = height def lowerCAmelCase ( lowerCAmelCase_ )-> int: if node is None: return 0 return node.get_height() def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int: if a > b: return a return b def lowerCAmelCase ( lowerCAmelCase_ )-> MyNode: print('''left rotation node:''' , node.get_data() ) lowerCAmelCase_ : List[str] = node.get_left() assert ret is not None node.set_left(ret.get_right() ) ret.set_right(UpperCAmelCase__ ) lowerCAmelCase_ : List[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(UpperCAmelCase__ ) lowerCAmelCase_ : Tuple = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(UpperCAmelCase__ ) return ret def lowerCAmelCase ( lowerCAmelCase_ )-> MyNode: print('''right rotation node:''' , node.get_data() ) lowerCAmelCase_ : Dict = node.get_right() assert ret is not None node.set_right(ret.get_left() ) ret.set_left(UpperCAmelCase__ ) lowerCAmelCase_ : Optional[int] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(UpperCAmelCase__ ) lowerCAmelCase_ : int = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(UpperCAmelCase__ ) return ret def lowerCAmelCase ( lowerCAmelCase_ )-> MyNode: lowerCAmelCase_ : Union[str, Any] = node.get_left() assert left_child is not None node.set_left(left_rotation(UpperCAmelCase__ ) ) return right_rotation(UpperCAmelCase__ ) def lowerCAmelCase ( lowerCAmelCase_ )-> MyNode: lowerCAmelCase_ : List[Any] = node.get_right() assert right_child is not None node.set_right(right_rotation(UpperCAmelCase__ ) ) return left_rotation(UpperCAmelCase__ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> MyNode | None: if node is None: return MyNode(UpperCAmelCase__ ) if data < node.get_data(): node.set_left(insert_node(node.get_left() , UpperCAmelCase__ ) ) if ( get_height(node.get_left() ) - get_height(node.get_right() ) == 2 ): # an unbalance detected lowerCAmelCase_ : List[str] = node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child lowerCAmelCase_ : Tuple = right_rotation(UpperCAmelCase__ ) else: lowerCAmelCase_ : str = lr_rotation(UpperCAmelCase__ ) else: node.set_right(insert_node(node.get_right() , UpperCAmelCase__ ) ) if get_height(node.get_right() ) - get_height(node.get_left() ) == 2: lowerCAmelCase_ : Optional[int] = node.get_right() assert right_child is not None if data < right_child.get_data(): lowerCAmelCase_ : Any = rl_rotation(UpperCAmelCase__ ) else: lowerCAmelCase_ : Union[str, Any] = left_rotation(UpperCAmelCase__ ) lowerCAmelCase_ : Optional[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(UpperCAmelCase__ ) return node def lowerCAmelCase ( lowerCAmelCase_ )-> Any: while True: lowerCAmelCase_ : List[Any] = root.get_right() if right_child is None: break lowerCAmelCase_ : Optional[Any] = right_child return root.get_data() def lowerCAmelCase ( lowerCAmelCase_ )-> Any: while True: lowerCAmelCase_ : List[Any] = root.get_left() if left_child is None: break lowerCAmelCase_ : List[str] = left_child return root.get_data() def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> MyNode | None: lowerCAmelCase_ : Any = root.get_left() lowerCAmelCase_ : Dict = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: lowerCAmelCase_ : Any = get_left_most(UpperCAmelCase__ ) root.set_data(UpperCAmelCase__ ) root.set_right(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) ) elif left_child is not None: lowerCAmelCase_ : Optional[Any] = left_child elif right_child is not None: lowerCAmelCase_ : Any = right_child else: return None elif root.get_data() > data: if left_child is None: print('''No such data''' ) return root else: root.set_left(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) ) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) ) if get_height(UpperCAmelCase__ ) - get_height(UpperCAmelCase__ ) == 2: assert right_child is not None if get_height(right_child.get_right() ) > get_height(right_child.get_left() ): lowerCAmelCase_ : List[Any] = left_rotation(UpperCAmelCase__ ) else: lowerCAmelCase_ : str = rl_rotation(UpperCAmelCase__ ) elif get_height(UpperCAmelCase__ ) - get_height(UpperCAmelCase__ ) == -2: assert left_child is not None if get_height(left_child.get_left() ) > get_height(left_child.get_right() ): lowerCAmelCase_ : Optional[Any] = right_rotation(UpperCAmelCase__ ) else: lowerCAmelCase_ : Dict = lr_rotation(UpperCAmelCase__ ) lowerCAmelCase_ : Any = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1 root.set_height(UpperCAmelCase__ ) return root class snake_case__: '''simple docstring''' def __init__( self ) -> Optional[int]: lowerCAmelCase_ : List[str] = None def lowercase_ ( self ) -> Optional[int]: return get_height(self.root ) def lowercase_ ( self , __lowercase ) -> str: print('''insert:''' + str(__UpperCamelCase ) ) lowerCAmelCase_ : Optional[int] = insert_node(self.root , __UpperCamelCase ) def lowercase_ ( self , __lowercase ) -> Tuple: print('''delete:''' + str(__UpperCamelCase ) ) if self.root is None: print('''Tree is empty!''' ) return lowerCAmelCase_ : Optional[Any] = del_node(self.root , __UpperCamelCase ) def __str__( self , ) -> Union[str, Any]: # a level traversale, gives a more intuitive look on the tree lowerCAmelCase_ : str = '''''' lowerCAmelCase_ : List[str] = MyQueue() q.push(self.root ) lowerCAmelCase_ : Dict = self.get_height() if layer == 0: return output lowerCAmelCase_ : Union[str, Any] = 0 while not q.is_empty(): lowerCAmelCase_ : List[Any] = q.pop() lowerCAmelCase_ : List[Any] = ''' ''' * int(math.pow(2 , layer - 1 ) ) output += space if node is None: output += "*" q.push(__UpperCamelCase ) q.push(__UpperCamelCase ) else: output += str(node.get_data() ) q.push(node.get_left() ) q.push(node.get_right() ) output += space lowerCAmelCase_ : Tuple = cnt + 1 for i in range(1_0_0 ): if cnt == math.pow(2 , __UpperCamelCase ) - 1: lowerCAmelCase_ : Dict = layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def lowerCAmelCase ( )-> None: import doctest doctest.testmod() if __name__ == "__main__": _test() _UpperCAmelCase : Dict =AVLtree() _UpperCAmelCase : int =list(range(10)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
715
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _UpperCAmelCase : Any ="""src/transformers""" # This is to make sure the transformers module imported is the one in the repo. _UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS) _UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` _UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") _UpperCAmelCase : Any ={ """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : Any = None # source code of `config_class` lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ ) lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/''' ): lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: lowerCAmelCase_ : List[str] = ckpt_name break return checkpoint def lowerCAmelCase ( )-> Optional[Any]: lowerCAmelCase_ : Tuple = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ ) lowerCAmelCase_ : Tuple = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) ) raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
619
0
def lowerCAmelCase ( lowerCAmelCase_ )-> list: if n_term == "": return [] lowerCAmelCase_ : Dict = [] for temp in range(int(_UpperCamelCase ) ): series.append(f"""1/{temp + 1}""" if series else '''1''' ) return series if __name__ == "__main__": _UpperCAmelCase : str =input("""Enter the last number (nth term) of the Harmonic Series""") print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""") print(harmonic_series(nth_term))
716
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple: lowerCAmelCase_ : Optional[int] = scheduler lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers] lowerCAmelCase_ : str = split_batches lowerCAmelCase_ : Any = step_with_optimizer lowerCAmelCase_ : Optional[Any] = GradientState() def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__lowercase , **__lowercase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__lowercase , **__lowercase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes for _ in range(__lowercase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , '''total_steps''' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__lowercase , **__lowercase ) else: self.scheduler.step(*__lowercase , **__lowercase ) def lowercase_ ( self ) -> Union[str, Any]: return self.scheduler.get_last_lr() def lowercase_ ( self ) -> List[str]: return self.scheduler.state_dict() def lowercase_ ( self , __lowercase ) -> int: self.scheduler.load_state_dict(__lowercase ) def lowercase_ ( self ) -> Tuple: return self.scheduler.get_lr() def lowercase_ ( self , *__lowercase , **__lowercase ) -> int: return self.scheduler.print_lr(*__lowercase , **__lowercase )
619
0
from __future__ import annotations import math def lowerCAmelCase ( lowerCAmelCase_ ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True _UpperCAmelCase : Tuple =[num for num in range(3, 10_0001, 2) if not is_prime(num)] def lowerCAmelCase ( lowerCAmelCase_ ): if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError('''n must be an integer''' ) if n <= 0: raise ValueError('''n must be >= 0''' ) lowerCAmelCase_ : List[str] = [] for num in range(len(lowerCAmelCase_ ) ): lowerCAmelCase_ : Dict = 0 while 2 * i * i <= odd_composites[num]: lowerCAmelCase_ : Any = odd_composites[num] - 2 * i * i if is_prime(lowerCAmelCase_ ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(lowerCAmelCase_ ) == n: return list_nums return [] def lowerCAmelCase ( ): return compute_nums(1 )[0] if __name__ == "__main__": print(f"""{solution() = }""")
717
from manim import * class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : int = [mem.copy() for i in range(6 )] lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 ) lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowercase ) lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )] lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 ) lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) gpu.move_to([-1, -1, 0] ) self.add(__lowercase ) lowerCAmelCase_ : str = [mem.copy() for i in range(6 )] lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 ) lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) model.move_to([3, -1.0, 0] ) self.add(__lowercase ) lowerCAmelCase_ : int = [] lowerCAmelCase_ : int = [] lowerCAmelCase_ : Dict = [] for i, rect in enumerate(__lowercase ): rect.set_stroke(__lowercase ) lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 ) self.add(__lowercase ) model_cpu_arr.append(__lowercase ) self.add(*__lowercase , *__lowercase , *__lowercase ) lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 ) lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__lowercase ) lowerCAmelCase_ : Optional[Any] = [] lowerCAmelCase_ : Dict = [] for i, rect in enumerate(__lowercase ): lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 ) target.move_to(__lowercase ) ckpt_arr.append(__lowercase ) lowerCAmelCase_ : Union[str, Any] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__lowercase ) self.add(*__lowercase , *__lowercase ) lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase_ : str = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowercase , __lowercase ) lowerCAmelCase_ : str = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , ) blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__lowercase ) lowerCAmelCase_ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )] lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 ) lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) ) lowerCAmelCase_ : int = [] for i, rect in enumerate(__lowercase ): lowerCAmelCase_ : int = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__lowercase , run_time=1.5 ) ) self.play(*__lowercase ) self.play(FadeOut(__lowercase ) ) lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowercase , run_time=3 ) ) self.play( FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , ) self.wait()
619
0
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""") _UpperCAmelCase : int =logging.getLogger(__name__) @dataclass class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = field( default=128, metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) }, ) SCREAMING_SNAKE_CASE__ : bool = field( default=__lowerCAmelCase, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) SCREAMING_SNAKE_CASE__ : bool = field( default=__lowerCAmelCase, metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) }, ) SCREAMING_SNAKE_CASE__ : Optional[int] = field( default=__lowerCAmelCase, metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) }, ) SCREAMING_SNAKE_CASE__ : Optional[int] = field( default=__lowerCAmelCase, metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) }, ) SCREAMING_SNAKE_CASE__ : Optional[int] = field( default=__lowerCAmelCase, metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of prediction examples to this """ """value if set.""" ) }, ) @dataclass class snake_case__: '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = field( default=__lowerCAmelCase, metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) SCREAMING_SNAKE_CASE__ : str = field( default=__lowerCAmelCase, metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} ) SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=__lowerCAmelCase, metadata={"""help""": """Train language if it is different from the evaluation language."""} ) SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=__lowerCAmelCase, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=__lowerCAmelCase, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=__lowerCAmelCase, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, ) SCREAMING_SNAKE_CASE__ : Optional[bool] = field( default=__lowerCAmelCase, metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""}, ) SCREAMING_SNAKE_CASE__ : bool = field( default=__lowerCAmelCase, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, ) SCREAMING_SNAKE_CASE__ : str = field( default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, ) SCREAMING_SNAKE_CASE__ : bool = field( default=__lowerCAmelCase, metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) }, ) SCREAMING_SNAKE_CASE__ : bool = field( default=__lowerCAmelCase, metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""}, ) def lowerCAmelCase ( )-> Optional[Any]: lowerCAmelCase_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowerCAmelCase_ : List[str] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_xnli''' , lowerCAmelCase_ ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowerCAmelCase_ : int = training_args.get_process_log_level() logger.setLevel(lowerCAmelCase_ ) datasets.utils.logging.set_verbosity(lowerCAmelCase_ ) transformers.utils.logging.set_verbosity(lowerCAmelCase_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. lowerCAmelCase_ : Optional[int] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCAmelCase_ : int = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: lowerCAmelCase_ : Union[str, Any] = load_dataset( '''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: lowerCAmelCase_ : Union[str, Any] = load_dataset( '''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) lowerCAmelCase_ : Optional[int] = train_dataset.features['''label'''].names if training_args.do_eval: lowerCAmelCase_ : Tuple = load_dataset( '''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) lowerCAmelCase_ : str = eval_dataset.features['''label'''].names if training_args.do_predict: lowerCAmelCase_ : int = load_dataset( '''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) lowerCAmelCase_ : Tuple = predict_dataset.features['''label'''].names # Labels lowerCAmelCase_ : str = len(lowerCAmelCase_ ) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCAmelCase_ : str = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , idalabel={str(lowerCAmelCase_ ): label for i, label in enumerate(lowerCAmelCase_ )} , labelaid={label: i for i, label in enumerate(lowerCAmelCase_ )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowerCAmelCase_ : int = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowerCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: lowerCAmelCase_ : Optional[int] = '''max_length''' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch lowerCAmelCase_ : Optional[Any] = False def preprocess_function(lowerCAmelCase_ ): # Tokenize the texts return tokenizer( examples['''premise'''] , examples['''hypothesis'''] , padding=lowerCAmelCase_ , max_length=data_args.max_seq_length , truncation=lowerCAmelCase_ , ) if training_args.do_train: if data_args.max_train_samples is not None: lowerCAmelCase_ : Union[str, Any] = min(len(lowerCAmelCase_ ) , data_args.max_train_samples ) lowerCAmelCase_ : Optional[Any] = train_dataset.select(range(lowerCAmelCase_ ) ) with training_args.main_process_first(desc='''train dataset map pre-processing''' ): lowerCAmelCase_ : Tuple = train_dataset.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , ) # Log a few random samples from the training set: for index in random.sample(range(len(lowerCAmelCase_ ) ) , 3 ): logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" ) if training_args.do_eval: if data_args.max_eval_samples is not None: lowerCAmelCase_ : Optional[int] = min(len(lowerCAmelCase_ ) , data_args.max_eval_samples ) lowerCAmelCase_ : int = eval_dataset.select(range(lowerCAmelCase_ ) ) with training_args.main_process_first(desc='''validation dataset map pre-processing''' ): lowerCAmelCase_ : List[Any] = eval_dataset.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , ) if training_args.do_predict: if data_args.max_predict_samples is not None: lowerCAmelCase_ : Optional[int] = min(len(lowerCAmelCase_ ) , data_args.max_predict_samples ) lowerCAmelCase_ : Tuple = predict_dataset.select(range(lowerCAmelCase_ ) ) with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ): lowerCAmelCase_ : Tuple = predict_dataset.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , ) # Get the metric function lowerCAmelCase_ : Any = evaluate.load('''xnli''' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(lowerCAmelCase_ ): lowerCAmelCase_ : List[str] = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase_ ) else p.predictions lowerCAmelCase_ : Union[str, Any] = np.argmax(lowerCAmelCase_ , axis=1 ) return metric.compute(predictions=lowerCAmelCase_ , references=p.label_ids ) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: lowerCAmelCase_ : Optional[Any] = default_data_collator elif training_args.fpaa: lowerCAmelCase_ : Optional[Any] = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 ) else: lowerCAmelCase_ : Tuple = None # Initialize our Trainer lowerCAmelCase_ : Optional[int] = Trainer( model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , ) # Training if training_args.do_train: lowerCAmelCase_ : str = None if training_args.resume_from_checkpoint is not None: lowerCAmelCase_ : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowerCAmelCase_ : List[str] = last_checkpoint lowerCAmelCase_ : Tuple = trainer.train(resume_from_checkpoint=lowerCAmelCase_ ) lowerCAmelCase_ : Tuple = train_result.metrics lowerCAmelCase_ : int = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ ) ) lowerCAmelCase_ : Optional[int] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('''train''' , lowerCAmelCase_ ) trainer.save_metrics('''train''' , lowerCAmelCase_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowerCAmelCase_ : List[str] = trainer.evaluate(eval_dataset=lowerCAmelCase_ ) lowerCAmelCase_ : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase_ ) lowerCAmelCase_ : Any = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) trainer.log_metrics('''eval''' , lowerCAmelCase_ ) trainer.save_metrics('''eval''' , lowerCAmelCase_ ) # Prediction if training_args.do_predict: logger.info('''*** Predict ***''' ) lowerCAmelCase_ : str = trainer.predict(lowerCAmelCase_ , metric_key_prefix='''predict''' ) lowerCAmelCase_ : Union[str, Any] = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCAmelCase_ ) ) lowerCAmelCase_ : List[Any] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) trainer.log_metrics('''predict''' , lowerCAmelCase_ ) trainer.save_metrics('''predict''' , lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = np.argmax(lowerCAmelCase_ , axis=1 ) lowerCAmelCase_ : Union[str, Any] = os.path.join(training_args.output_dir , '''predictions.txt''' ) if trainer.is_world_process_zero(): with open(lowerCAmelCase_ , '''w''' ) as writer: writer.write('''index\tprediction\n''' ) for index, item in enumerate(lowerCAmelCase_ ): lowerCAmelCase_ : Union[str, Any] = label_list[item] writer.write(f"""{index}\t{item}\n""" ) if __name__ == "__main__": main()
718
_UpperCAmelCase : Dict =[ (1000, """M"""), (900, """CM"""), (500, """D"""), (400, """CD"""), (100, """C"""), (90, """XC"""), (50, """L"""), (40, """XL"""), (10, """X"""), (9, """IX"""), (5, """V"""), (4, """IV"""), (1, """I"""), ] def lowerCAmelCase ( lowerCAmelCase_ )-> int: lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000} lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : List[str] = 0 while place < len(lowerCAmelCase_ ): if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : List[Any] = [] for arabic, roman in ROMAN: ((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ ) result.append(roman * factor ) if number == 0: break return "".join(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
619
0
import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=[3_0, 3_0] , __lowercase=2 , __lowercase=3 , __lowercase=True , __lowercase=True , __lowercase=3_2 , __lowercase=5 , __lowercase=4 , __lowercase=3_7 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1_0 , __lowercase=0.02 , __lowercase=3 , __lowercase=None , __lowercase=8 , __lowercase=1_0 , ) -> str: lowerCAmelCase_ : Dict = parent lowerCAmelCase_ : Dict = batch_size lowerCAmelCase_ : Optional[Any] = image_size lowerCAmelCase_ : int = patch_size lowerCAmelCase_ : Optional[Any] = num_channels lowerCAmelCase_ : List[Any] = is_training lowerCAmelCase_ : Union[str, Any] = use_labels lowerCAmelCase_ : Any = hidden_size lowerCAmelCase_ : Tuple = num_hidden_layers lowerCAmelCase_ : str = num_attention_heads lowerCAmelCase_ : Optional[Any] = intermediate_size lowerCAmelCase_ : Tuple = hidden_act lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob lowerCAmelCase_ : Optional[Any] = attention_probs_dropout_prob lowerCAmelCase_ : List[str] = type_sequence_label_size lowerCAmelCase_ : Dict = initializer_range lowerCAmelCase_ : List[Any] = num_labels lowerCAmelCase_ : str = scope lowerCAmelCase_ : Optional[int] = n_targets lowerCAmelCase_ : Optional[int] = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens lowerCAmelCase_ : Any = (image_size[1] // patch_size) * (image_size[0] // patch_size) lowerCAmelCase_ : Optional[Any] = num_patches + 1 + self.num_detection_tokens def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) lowerCAmelCase_ : Dict = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) lowerCAmelCase_ : Any = [] for i in range(self.batch_size ): lowerCAmelCase_ : Optional[Any] = {} lowerCAmelCase_ : Union[str, Any] = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=__lowercase ) lowerCAmelCase_ : str = torch.rand(self.n_targets , 4 , device=__lowercase ) labels.append(__lowercase ) lowerCAmelCase_ : Dict = self.get_config() return config, pixel_values, labels def lowercase_ ( self ) -> Dict: return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> str: lowerCAmelCase_ : Tuple = YolosModel(config=__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : str = model(__lowercase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : List[str] = YolosForObjectDetection(__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Optional[int] = model(pixel_values=__lowercase ) lowerCAmelCase_ : str = model(__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) lowerCAmelCase_ : List[Any] = model(pixel_values=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Any = self.prepare_config_and_inputs() lowerCAmelCase_ : Optional[int] = config_and_inputs lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class snake_case__( UpperCamelCase_, UpperCamelCase_, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Union[str, Any] = ( {"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : Union[str, Any] = False SCREAMING_SNAKE_CASE__ : Dict = False def lowercase_ ( self , __lowercase , __lowercase , __lowercase=False ) -> str: lowerCAmelCase_ : Optional[int] = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": lowerCAmelCase_ : Any = [] for i in range(self.model_tester.batch_size ): lowerCAmelCase_ : List[Any] = {} lowerCAmelCase_ : int = torch.ones( size=(self.model_tester.n_targets,) , device=__lowercase , dtype=torch.long ) lowerCAmelCase_ : Union[str, Any] = torch.ones( self.model_tester.n_targets , 4 , device=__lowercase , dtype=torch.float ) labels.append(__lowercase ) lowerCAmelCase_ : Union[str, Any] = labels return inputs_dict def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : str = YolosModelTester(self ) lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=3_7 ) def lowercase_ ( self ) -> Dict: self.config_tester.run_common_tests() def lowercase_ ( self ) -> int: # YOLOS does not use inputs_embeds pass def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Dict = model_class(__lowercase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase_ : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Any = model_class(__lowercase ) lowerCAmelCase_ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : Optional[int] = [*signature.parameters.keys()] lowerCAmelCase_ : List[str] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ : Union[str, Any] = True # in YOLOS, the seq_len is different lowerCAmelCase_ : List[str] = self.model_tester.expected_seq_len for model_class in self.all_model_classes: lowerCAmelCase_ : List[Any] = True lowerCAmelCase_ : Optional[int] = False lowerCAmelCase_ : Any = True lowerCAmelCase_ : int = model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): lowerCAmelCase_ : int = model(**self._prepare_for_class(__lowercase , __lowercase ) ) lowerCAmelCase_ : Optional[Any] = outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCAmelCase_ : Optional[int] = True lowerCAmelCase_ : str = model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): lowerCAmelCase_ : List[Any] = model(**self._prepare_for_class(__lowercase , __lowercase ) ) lowerCAmelCase_ : Any = outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowerCAmelCase_ : Optional[int] = len(__lowercase ) # Check attention is always last and order is fine lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Union[str, Any] = True lowerCAmelCase_ : Union[str, Any] = model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): lowerCAmelCase_ : Any = model(**self._prepare_for_class(__lowercase , __lowercase ) ) lowerCAmelCase_ : Dict = 1 self.assertEqual(out_len + added_hidden_states , len(__lowercase ) ) lowerCAmelCase_ : int = outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def lowercase_ ( self ) -> Tuple: def check_hidden_states_output(__lowercase , __lowercase , __lowercase ): lowerCAmelCase_ : List[Any] = model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): lowerCAmelCase_ : str = model(**self._prepare_for_class(__lowercase , __lowercase ) ) lowerCAmelCase_ : Tuple = outputs.hidden_states lowerCAmelCase_ : Optional[int] = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__lowercase ) , __lowercase ) # YOLOS has a different seq_length lowerCAmelCase_ : List[str] = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Union[str, Any] = True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase_ : str = True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*__lowercase ) @slow def lowercase_ ( self ) -> int: for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ : Tuple = YolosModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def lowerCAmelCase ( )-> Dict: lowerCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class snake_case__( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase_ ( self ) -> Any: return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None @slow def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : int = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__lowercase ) lowerCAmelCase_ : Dict = self.default_image_processor lowerCAmelCase_ : List[Any] = prepare_img() lowerCAmelCase_ : Union[str, Any] = image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase ) # forward pass with torch.no_grad(): lowerCAmelCase_ : List[str] = model(inputs.pixel_values ) # verify outputs lowerCAmelCase_ : Union[str, Any] = torch.Size((1, 1_0_0, 9_2) ) self.assertEqual(outputs.logits.shape , __lowercase ) lowerCAmelCase_ : Dict = torch.tensor( [[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] , device=__lowercase , ) lowerCAmelCase_ : Optional[Any] = torch.tensor( [[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] , device=__lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __lowercase , atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowercase , atol=1e-4 ) ) # verify postprocessing lowerCAmelCase_ : List[str] = image_processor.post_process_object_detection( __lowercase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] lowerCAmelCase_ : List[str] = torch.tensor([0.99_94, 0.97_90, 0.99_64, 0.99_72, 0.98_61] ).to(__lowercase ) lowerCAmelCase_ : str = [7_5, 7_5, 1_7, 6_3, 1_7] lowerCAmelCase_ : str = torch.tensor([3_35.06_09, 79.38_48, 3_75.42_16, 1_87.24_95] ).to(__lowercase ) self.assertEqual(len(results['''scores'''] ) , 5 ) self.assertTrue(torch.allclose(results['''scores'''] , __lowercase , atol=1e-4 ) ) self.assertSequenceEqual(results['''labels'''].tolist() , __lowercase ) self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __lowercase ) )
719
import csv import tweepy # Twitter API credentials _UpperCAmelCase : int ="""""" _UpperCAmelCase : Optional[int] ="""""" _UpperCAmelCase : Dict ="""""" _UpperCAmelCase : str ="""""" def lowerCAmelCase ( lowerCAmelCase_ )-> None: # authorize twitter, initialize tweepy lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ ) auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ ) # initialize a list to hold all the tweepy Tweets lowerCAmelCase_ : Dict = [] # make initial request for most recent tweets (200 is the maximum allowed count) lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # save the id of the oldest tweet less one lowerCAmelCase_ : str = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowerCAmelCase_ ) > 0: print(f"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates lowerCAmelCase_ : Optional[Any] = api.user_timeline( screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # update the id of the oldest tweet less one lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1 print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f: lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(lowerCAmelCase_ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
619
0
from PIL import Image def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple: def brightness(lowerCAmelCase_ ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' ) return img.point(UpperCamelCase__ ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 _UpperCAmelCase : Union[str, Any] =change_brightness(img, 100) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
720
from math import sqrt def lowerCAmelCase ( lowerCAmelCase_ )-> bool: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase_ : str = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase_ : List[Any] = False for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase_ : Any = False break # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool" return status def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) ) lowerCAmelCase_ : List[Any] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowerCAmelCase_ ) ): for j in range(i + 1 , len(lowerCAmelCase_ ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase_ : Tuple = 0 # filters actual prime numbers. lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0] # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> int: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase_ : List[str] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(lowerCAmelCase_ ): ans.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase_ : Any = 2 lowerCAmelCase_ : List[str] = number if number == 0 or number == 1: ans.append(lowerCAmelCase_ ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowerCAmelCase_ ): while quotient != 1: if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0): ans.append(lowerCAmelCase_ ) quotient /= factor else: factor += 1 else: ans.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = 0 # prime factorization of 'number' lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : Dict = max(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> str: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = 0 # prime factorization of 'number' lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int" assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool" return number % 2 == 0 def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int" assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool" return number % 2 != 0 def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ ) ), "'number' must been an int, even and > 2" lowerCAmelCase_ : Union[str, Any] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ ) lowerCAmelCase_ : Any = len(lowerCAmelCase_ ) # run variable for while-loops. lowerCAmelCase_ : List[Any] = 0 lowerCAmelCase_ : List[Any] = None # exit variable. for break up the loops lowerCAmelCase_ : int = True while i < len_pn and loop: lowerCAmelCase_ : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase_ : Union[str, Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (len(lowerCAmelCase_ ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase_ : List[str] = 0 while numbera != 0: lowerCAmelCase_ : int = numbera % numbera lowerCAmelCase_ : Union[str, Any] = numbera lowerCAmelCase_ : Tuple = rest # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase_ : Dict = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ ) elif numbera == 1 or numbera == 1: lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : str = 0 lowerCAmelCase_ : List[str] = 0 lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ): ans *= n else: lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ): ans *= n done.append(lowerCAmelCase_ ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ): ans *= n done.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowerCAmelCase_ ): ans += 1 # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime( lowerCAmelCase_ ), "'ans' must been a prime number and from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict: assert ( is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number lowerCAmelCase_ : Dict = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowerCAmelCase_ ): number += 1 while number < p_number_a: ans.append(lowerCAmelCase_ ) number += 1 # fetch the next prime number. while not is_prime(lowerCAmelCase_ ): number += 1 # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ans[0] != p_number_a and ans[len(lowerCAmelCase_ ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase_ : List[str] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(lowerCAmelCase_ ) # precondition assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ ) # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (divisors[0] == 1) and (divisors[len(lowerCAmelCase_ ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) ) # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase_ : Tuple = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Optional[Any] = 1 lowerCAmelCase_ : Tuple = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase_ : Any = ans ans += fiba lowerCAmelCase_ : Dict = tmp return ans
619
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Union[str, Any] =logging.get_logger(__name__) _UpperCAmelCase : str ={ """s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""", } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = """open-llama""" def __init__( self , __lowercase=1_0_0_0_0_0 , __lowercase=4_0_9_6 , __lowercase=1_1_0_0_8 , __lowercase=3_2 , __lowercase=3_2 , __lowercase="silu" , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-6 , __lowercase=True , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=False , __lowercase=True , __lowercase=0.1 , __lowercase=0.1 , __lowercase=True , __lowercase=True , __lowercase=None , **__lowercase , ) -> List[str]: lowerCAmelCase_ : Dict = vocab_size lowerCAmelCase_ : Optional[int] = max_position_embeddings lowerCAmelCase_ : List[Any] = hidden_size lowerCAmelCase_ : Tuple = intermediate_size lowerCAmelCase_ : List[Any] = num_hidden_layers lowerCAmelCase_ : Tuple = num_attention_heads lowerCAmelCase_ : List[str] = hidden_act lowerCAmelCase_ : int = initializer_range lowerCAmelCase_ : Optional[Any] = rms_norm_eps lowerCAmelCase_ : List[Any] = use_cache lowerCAmelCase_ : List[str] = kwargs.pop( '''use_memorry_efficient_attention''' , __lowercase ) lowerCAmelCase_ : str = hidden_dropout_prob lowerCAmelCase_ : Any = attention_dropout_prob lowerCAmelCase_ : Any = use_stable_embedding lowerCAmelCase_ : Any = shared_input_output_embedding lowerCAmelCase_ : int = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , tie_word_embeddings=__lowercase , **__lowercase , ) def lowercase_ ( self ) -> Any: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f"""got {self.rope_scaling}""" ) lowerCAmelCase_ : List[Any] = self.rope_scaling.get('''type''' , __lowercase ) lowerCAmelCase_ : int = self.rope_scaling.get('''factor''' , __lowercase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
721
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. _UpperCAmelCase : Tuple =10 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int: for i in range(lowerCAmelCase_ , lowerCAmelCase_ ): if array[i] == target: return i return -1 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int: lowerCAmelCase_ : Any = 0 lowerCAmelCase_ : int = len(lowerCAmelCase_ ) while left <= right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1 lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: lowerCAmelCase_ : Dict = one_third - 1 elif array[two_third] < target: lowerCAmelCase_ : List[Any] = two_third + 1 else: lowerCAmelCase_ : Union[str, Any] = one_third + 1 lowerCAmelCase_ : Tuple = two_third - 1 else: return -1 def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int: if left < right: if right - left < precision: return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1 lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip() _UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." _UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip()) _UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target) _UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f"""Iterative search: {target} found at positions: {resulta}""") print(f"""Recursive search: {target} found at positions: {resulta}""") else: print("""Not found""")
619
0
import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) _UpperCAmelCase : Optional[int] =getLogger(__name__) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 8 , lowerCAmelCase_ = 1_024 , lowerCAmelCase_="val" , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_="summarization" , lowerCAmelCase_=None , lowerCAmelCase_=1 , lowerCAmelCase_ = None , lowerCAmelCase_="" , **lowerCAmelCase_ , )-> Dict: lowerCAmelCase_ : List[str] = str(lowerCAmelCase_ ) assert local_rank is not None torch.distributed.init_process_group(backend='''nccl''' , rank=lowerCAmelCase_ ) lowerCAmelCase_ : Any = Path(lowerCAmelCase_ ) lowerCAmelCase_ : int = save_dir.joinpath(f"""rank_{local_rank}_output.json""" ) torch.cuda.set_device(lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ ).cuda() if fpaa: lowerCAmelCase_ : List[str] = model.half() # determine if we need to increase num_beams use_task_specific_params(lowerCAmelCase_ , lowerCAmelCase_ ) # update config with task specific params lowerCAmelCase_ : str = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: lowerCAmelCase_ : Optional[Any] = num_return_sequences lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_ ) logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type. if max_source_length is None: lowerCAmelCase_ : List[str] = tokenizer.model_max_length if prefix is None: lowerCAmelCase_ : List[str] = prefix or getattr(model.config , '''prefix''' , '''''' ) or '''''' lowerCAmelCase_ : Any = SeqaSeqDataset( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , max_target_length=1_024 , type_path=lowerCAmelCase_ , n_obs=lowerCAmelCase_ , prefix=lowerCAmelCase_ , **lowerCAmelCase_ , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. lowerCAmelCase_ : int = ds.make_sortish_sampler(lowerCAmelCase_ , distributed=lowerCAmelCase_ , add_extra_examples=lowerCAmelCase_ , shuffle=lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , collate_fn=ds.collate_fn ) lowerCAmelCase_ : Union[str, Any] = [] for batch in tqdm(lowerCAmelCase_ ): lowerCAmelCase_ : Optional[int] = model.generate( input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=lowerCAmelCase_ , num_beams=lowerCAmelCase_ , **lowerCAmelCase_ , ) lowerCAmelCase_ : Union[str, Any] = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) lowerCAmelCase_ : Dict = batch['''ids'''] if num_return_sequences > 1: lowerCAmelCase_ : int = chunks(lowerCAmelCase_ , lowerCAmelCase_ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(lowerCAmelCase_ ): results.append({'''pred''': pred, '''id''': ids[i].item()} ) save_json(lowerCAmelCase_ , lowerCAmelCase_ ) return results, sampler.num_replicas def lowerCAmelCase ( )-> Optional[int]: lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser( epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' ) parser.add_argument('''--data_dir''' , type=lowerCAmelCase_ , help='''like cnn_dm/test.source''' ) parser.add_argument( '''--model_name''' , type=lowerCAmelCase_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , ) parser.add_argument('''--save_dir''' , type=lowerCAmelCase_ , help='''where to save''' , default='''tmp_gen''' ) parser.add_argument('''--max_source_length''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ ) parser.add_argument( '''--type_path''' , type=lowerCAmelCase_ , default='''test''' , help='''which subset to evaluate typically train/val/test''' ) parser.add_argument('''--task''' , type=lowerCAmelCase_ , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=lowerCAmelCase_ , default=8 , required=lowerCAmelCase_ , help='''batch size''' ) parser.add_argument( '''--local_rank''' , type=lowerCAmelCase_ , default=-1 , required=lowerCAmelCase_ , help='''should be passed by distributed.launch''' ) parser.add_argument( '''--n_obs''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''How many observations. Defaults to all.''' ) parser.add_argument( '''--num_return_sequences''' , type=lowerCAmelCase_ , default=1 , required=lowerCAmelCase_ , help='''How many sequences to return''' ) parser.add_argument( '''--sync_timeout''' , type=lowerCAmelCase_ , default=600 , required=lowerCAmelCase_ , help='''How long should master process wait for other processes to finish.''' , ) parser.add_argument('''--src_lang''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , required=lowerCAmelCase_ ) parser.add_argument('''--tgt_lang''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , required=lowerCAmelCase_ ) parser.add_argument( '''--prefix''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , default=lowerCAmelCase_ , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--debug''' , action='''store_true''' ) lowerCAmelCase_ : List[str] = time.time() lowerCAmelCase_ : Optional[Any] = parser.parse_known_args() lowerCAmelCase_ : Tuple = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase_ ) if generate_kwargs and args.local_rank <= 0: print(f"""parsed the following generate kwargs: {generate_kwargs}""" ) lowerCAmelCase_ : Dict = Path(args.save_dir + '''_tmp''' ) Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) # this handles locking. lowerCAmelCase_ : List[Any] = list(json_save_dir.glob('''rank_*.json''' ) ) if intermediate_files: raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. lowerCAmelCase_ : List[str] = {} if args.src_lang is not None: lowerCAmelCase_ : List[Any] = args.src_lang if args.tgt_lang is not None: lowerCAmelCase_ : Optional[int] = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = eval_data_dir( args.data_dir , lowerCAmelCase_ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase_ , **lowerCAmelCase_ , ) if args.local_rank <= 0: lowerCAmelCase_ : str = Path(args.save_dir ) save_dir.mkdir(exist_ok=lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = gather_results_from_each_node(lowerCAmelCase_ , lowerCAmelCase_ , args.sync_timeout ) lowerCAmelCase_ : int = combine_partial_results(lowerCAmelCase_ ) if args.num_return_sequences > 1: lowerCAmelCase_ : Union[str, Any] = save_dir.joinpath('''pseudolabel_results.json''' ) print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" ) save_json(lowerCAmelCase_ , lowerCAmelCase_ ) return lowerCAmelCase_ : List[str] = Path(args.data_dir ).joinpath(args.type_path + '''.target''' ) with open(lowerCAmelCase_ ) as f: lowerCAmelCase_ : int = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase_ )] # Calculate metrics, save metrics, and save _generations.txt lowerCAmelCase_ : int = '''translation''' in args.task lowerCAmelCase_ : Any = calculate_bleu if calc_bleu else calculate_rouge lowerCAmelCase_ : int = '''bleu''' if calc_bleu else '''rouge''' lowerCAmelCase_ : Dict = score_fn(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : str = len(lowerCAmelCase_ ) lowerCAmelCase_ : Tuple = time.time() - start_time lowerCAmelCase_ : Tuple = round(runtime / metrics['''n_obs'''] , 4 ) lowerCAmelCase_ : Dict = num_replicas # TODO(@stas00): add whatever metadata to metrics lowerCAmelCase_ : Any = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" ) save_json(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ ) print(lowerCAmelCase_ ) write_txt_file(lowerCAmelCase_ , save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) ) if args.debug: write_txt_file(lowerCAmelCase_ , save_dir.joinpath(f"""{args.type_path}.target""" ) ) else: shutil.rmtree(lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ )-> List: lowerCAmelCase_ : Union[str, Any] = [] for partial_result in partial_results: records.extend(lowerCAmelCase_ ) lowerCAmelCase_ : str = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x["id"] ) lowerCAmelCase_ : Any = [x['''pred'''] for x in records] return preds def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Dict[str, List]]: # WAIT FOR lots of .json files lowerCAmelCase_ : Union[str, Any] = time.time() logger.info('''waiting for all nodes to finish''' ) lowerCAmelCase_ : Any = None while (time.time() - start_wait) < timeout: lowerCAmelCase_ : Union[str, Any] = list(save_dir.glob('''rank_*.json''' ) ) if len(lowerCAmelCase_ ) < num_replicas: continue try: # make sure all json files are fully saved lowerCAmelCase_ : List[str] = lmap(lowerCAmelCase_ , lowerCAmelCase_ ) return json_data except JSONDecodeError: continue else: raise TimeoutError('''Rank 0 gave up on waiting for other processes''' ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
700
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _UpperCAmelCase : Union[str, Any] ={ """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Tuple =[ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys _UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
619
0
def lowerCAmelCase ( lowerCAmelCase_ = 100 )-> int: lowerCAmelCase_ : Dict = set() lowerCAmelCase_ : List[Any] = 0 lowerCAmelCase_ : str = n + 1 # maximum limit for a in range(2 , lowerCAmelCase_ ): for b in range(2 , lowerCAmelCase_ ): lowerCAmelCase_ : Tuple = a**b # calculates the current power collect_powers.add(lowerCAmelCase_ ) # adds the result to the set return len(lowerCAmelCase_ ) if __name__ == "__main__": print("""Number of terms """, solution(int(str(input()).strip())))
701
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu _UpperCAmelCase : Any =False class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase_ ( self ) -> Union[str, Any]: return 1_2 @property def lowercase_ ( self ) -> Any: return 1_2 @property def lowercase_ ( self ) -> Optional[Any]: return 3_2 @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ : Any = VQModel( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def lowercase_ ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(__lowercase ) @property def lowercase_ ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = 1_2 lowerCAmelCase_ : int = 1_2 lowerCAmelCase_ : Union[str, Any] = { '''attention_bias''': True, '''cross_attention_dim''': 3_2, '''attention_head_dim''': height * width, '''num_attention_heads''': 1, '''num_vector_embeds''': self.num_embed, '''num_embeds_ada_norm''': self.num_embeds_ada_norm, '''norm_num_groups''': 3_2, '''sample_size''': width, '''activation_fn''': '''geglu-approximate''', } lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase ) return model def lowercase_ ( self ) -> str: lowerCAmelCase_ : List[Any] = '''cpu''' lowerCAmelCase_ : Any = self.dummy_vqvae lowerCAmelCase_ : str = self.dummy_text_encoder lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer lowerCAmelCase_ : int = self.dummy_transformer lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase ) lowerCAmelCase_ : Dict = VQDiffusionPipeline( vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) lowerCAmelCase_ : int = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = '''teddy bear playing in the pool''' lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase_ : Union[str, Any] = output.images lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : List[Any] = pipe( [prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0] lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : Optional[Any] = '''cpu''' lowerCAmelCase_ : str = self.dummy_vqvae lowerCAmelCase_ : Dict = self.dummy_text_encoder lowerCAmelCase_ : List[Any] = self.dummy_tokenizer lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed ) lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings( learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) lowerCAmelCase_ : List[str] = VQDiffusionPipeline( vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , ) lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase_ : Any = '''teddy bear playing in the pool''' lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' ) lowerCAmelCase_ : str = output.images lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Union[str, Any] = pipe( [prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0] lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class snake_case__( unittest.TestCase ): '''simple docstring''' def lowercase_ ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> int: lowerCAmelCase_ : Tuple = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' ) lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' ) lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase ) pipeline.set_progress_bar_config(disable=__lowercase ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 ) lowerCAmelCase_ : Optional[int] = pipeline( '''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , ) lowerCAmelCase_ : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) assert np.abs(expected_image - image ).max() < 2.0
619
0
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _UpperCAmelCase : List[Any] =logging.get_logger(__name__) _UpperCAmelCase : List[str] ={"""vocab_file""": """spiece.model"""} _UpperCAmelCase : List[Any] ={ """vocab_file""": { """TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""", } } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , __lowercase , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , __lowercase = None , **__lowercase , ) -> None: lowerCAmelCase_ : str = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token lowerCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , ) lowerCAmelCase_ : str = 3 lowerCAmelCase_ : Tuple = do_lower_case lowerCAmelCase_ : Any = remove_space lowerCAmelCase_ : Optional[int] = keep_accents lowerCAmelCase_ : Any = vocab_file lowerCAmelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowercase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( '''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. ''' '''See https://pypi.org/project/jieba/ for installation.''' ) lowerCAmelCase_ : Optional[int] = jieba lowerCAmelCase_ : Any = str.maketrans(''' \n''' , '''\u2582\u2583''' ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def lowercase_ ( self ) -> List[Any]: return len(self.sp_model ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Tuple = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> List[str]: lowerCAmelCase_ : Union[str, Any] = self.__dict__.copy() lowerCAmelCase_ : List[str] = None return state def __setstate__( self , __lowercase ) -> Tuple: lowerCAmelCase_ : Union[str, Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowerCAmelCase_ : List[str] = {} lowerCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase_ ( self , __lowercase ) -> Optional[int]: if self.remove_space: lowerCAmelCase_ : str = ''' '''.join(inputs.strip().split() ) else: lowerCAmelCase_ : Optional[Any] = inputs lowerCAmelCase_ : Dict = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' ) if not self.keep_accents: lowerCAmelCase_ : List[Any] = unicodedata.normalize('''NFKD''' , __lowercase ) lowerCAmelCase_ : List[Any] = ''''''.join([c for c in outputs if not unicodedata.combining(__lowercase )] ) if self.do_lower_case: lowerCAmelCase_ : Dict = outputs.lower() return outputs def lowercase_ ( self , __lowercase ) -> List[str]: lowerCAmelCase_ : List[str] = self.preprocess_text(__lowercase ) lowerCAmelCase_ : Optional[Any] = self.sp_model.encode(__lowercase , out_type=__lowercase ) lowerCAmelCase_ : List[str] = [] for piece in pieces: if len(__lowercase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit(): lowerCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowercase , '''''' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCAmelCase_ : Optional[int] = cur_pieces[1:] else: lowerCAmelCase_ : List[str] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__lowercase ) else: new_pieces.append(__lowercase ) return new_pieces def lowercase_ ( self , __lowercase ) -> int: return self.sp_model.PieceToId(__lowercase ) def lowercase_ ( self , __lowercase ) -> int: return self.sp_model.IdToPiece(__lowercase ) def lowercase_ ( self , __lowercase ) -> Tuple: lowerCAmelCase_ : Any = ''''''.join(__lowercase ).replace(__lowercase , ''' ''' ).strip() return out_string def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : List[Any] = [self.sep_token_id] lowerCAmelCase_ : str = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase ) if token_ids_a is not None: return ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1, 1] return ([0] * len(__lowercase )) + [1, 1] def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Optional[int] = [self.sep_token_id] lowerCAmelCase_ : str = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]: if not os.path.isdir(__lowercase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ : Any = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowercase ) elif not os.path.isfile(self.vocab_file ): with open(__lowercase , '''wb''' ) as fi: lowerCAmelCase_ : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(__lowercase ) return (out_vocab_file,) def lowercase_ ( self , *__lowercase , **__lowercase ) -> Optional[Any]: lowerCAmelCase_ : Tuple = super()._decode(*__lowercase , **__lowercase ) lowerCAmelCase_ : Any = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' ) return text
702
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: _UpperCAmelCase : Dict =None _UpperCAmelCase : Tuple =logging.get_logger(__name__) _UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} _UpperCAmelCase : Any ={ """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", }, """tokenizer_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""", }, } _UpperCAmelCase : Dict ={ """xlnet-base-cased""": None, """xlnet-large-cased""": None, } _UpperCAmelCase : Tuple ="""▁""" # Segments (not really needed) _UpperCAmelCase : str =0 _UpperCAmelCase : List[str] =1 _UpperCAmelCase : int =2 _UpperCAmelCase : Any =3 _UpperCAmelCase : List[Any] =4 class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Any = """left""" SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]: # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token super().__init__( vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , ) lowerCAmelCase_ : List[Any] = 3 lowerCAmelCase_ : Dict = do_lower_case lowerCAmelCase_ : Dict = remove_space lowerCAmelCase_ : List[str] = keep_accents lowerCAmelCase_ : List[str] = vocab_file lowerCAmelCase_ : str = False if not self.vocab_file else True def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Tuple = [self.sep_token_id] lowerCAmelCase_ : Any = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Optional[Any] = [self.sep_token_id] lowerCAmelCase_ : List[Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__lowercase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ : str = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ): copyfile(self.vocab_file , __lowercase ) return (out_vocab_file,)
619
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: _UpperCAmelCase : Tuple =None _UpperCAmelCase : int =logging.get_logger(__name__) _UpperCAmelCase : List[str] ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} _UpperCAmelCase : Tuple ={ """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), }, """tokenizer_file""": { """google/bigbird-roberta-base""": ( """https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json""" ), """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json""" ), }, } _UpperCAmelCase : List[str] ={ """google/bigbird-roberta-base""": 4096, """google/bigbird-roberta-large""": 4096, """google/bigbird-base-trivia-itc""": 4096, } _UpperCAmelCase : Dict ="""▁""" class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Dict = BigBirdTokenizer SCREAMING_SNAKE_CASE__ : Tuple = ["""input_ids""", """attention_mask"""] SCREAMING_SNAKE_CASE__ : List[int] = [] def __init__( self , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase="[SEP]" , __lowercase="[MASK]" , __lowercase="[CLS]" , **__lowercase , ) -> int: lowerCAmelCase_ : List[str] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else bos_token lowerCAmelCase_ : Dict = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else eos_token lowerCAmelCase_ : Tuple = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else unk_token lowerCAmelCase_ : Optional[int] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else pad_token lowerCAmelCase_ : str = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else cls_token lowerCAmelCase_ : str = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ : Optional[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token super().__init__( __lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , ) lowerCAmelCase_ : Tuple = vocab_file lowerCAmelCase_ : str = False if not self.vocab_file else True def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Dict = [self.sep_token_id] lowerCAmelCase_ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(__lowercase )) + [1] return [1] + ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1] def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]: lowerCAmelCase_ : Optional[int] = [self.sep_token_id] lowerCAmelCase_ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__lowercase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ : Any = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ): copyfile(self.vocab_file , __lowercase ) return (out_vocab_file,)
703
import math import qiskit def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts: if ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) ): raise TypeError('''inputs must be integers.''' ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError('''inputs must be positive.''' ) if ( (math.floor(lowerCAmelCase_ ) != input_a) or (math.floor(lowerCAmelCase_ ) != input_a) or (math.floor(lowerCAmelCase_ ) != carry_in) ): raise ValueError('''inputs must be exact integers.''' ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError('''inputs must be less or equal to 2.''' ) # build registers lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' ) lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' ) # list the entries lowerCAmelCase_ : Any = [input_a, input_a, carry_in] lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' ) lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 ) return job.result().get_counts(lowerCAmelCase_ ) if __name__ == "__main__": print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
619
0
import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : Optional[int] =logging.get_logger(__name__) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]: lowerCAmelCase_ : Optional[Any] = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''), ('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''), ('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''), ('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''), ('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''), ('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''), ] ) return rename_keys def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple: for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) lowerCAmelCase_ : List[Any] = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" ) lowerCAmelCase_ : List[str] = in_proj_weight[ : encoder_config.hidden_size, : ] lowerCAmelCase_ : Tuple = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] lowerCAmelCase_ : List[Any] = in_proj_weight[ -encoder_config.hidden_size :, : ] def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]: lowerCAmelCase_ : str = dct.pop(lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = val def lowerCAmelCase ( lowerCAmelCase_ )-> int: if "handwritten" in checkpoint_url: lowerCAmelCase_ : Optional[Any] = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: lowerCAmelCase_ : Dict = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg''' lowerCAmelCase_ : int = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ).convert('''RGB''' ) return im @torch.no_grad() def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> str: lowerCAmelCase_ : Any = ViTConfig(image_size=384 , qkv_bias=lowerCAmelCase_ ) lowerCAmelCase_ : Optional[Any] = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: lowerCAmelCase_ : Optional[Any] = 768 elif "large" in checkpoint_url: # use ViT-large encoder lowerCAmelCase_ : Tuple = 1_024 lowerCAmelCase_ : Tuple = 4_096 lowerCAmelCase_ : Dict = 24 lowerCAmelCase_ : int = 16 lowerCAmelCase_ : List[str] = 1_024 else: raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: lowerCAmelCase_ : int = False lowerCAmelCase_ : List[str] = '''relu''' lowerCAmelCase_ : Optional[int] = 1_024 lowerCAmelCase_ : Optional[Any] = True lowerCAmelCase_ : int = False lowerCAmelCase_ : str = False # load HuggingFace model lowerCAmelCase_ : Dict = ViTModel(lowerCAmelCase_ , add_pooling_layer=lowerCAmelCase_ ) lowerCAmelCase_ : str = TrOCRForCausalLM(lowerCAmelCase_ ) lowerCAmelCase_ : str = VisionEncoderDecoderModel(encoder=lowerCAmelCase_ , decoder=lowerCAmelCase_ ) model.eval() # load state_dict of original model, rename some keys lowerCAmelCase_ : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location='''cpu''' , check_hash=lowerCAmelCase_ )['''model'''] lowerCAmelCase_ : str = create_rename_keys(lowerCAmelCase_ , lowerCAmelCase_ ) for src, dest in rename_keys: rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): lowerCAmelCase_ : Tuple = state_dict.pop(lowerCAmelCase_ ) if key.startswith('''decoder''' ) and "output_projection" not in key: lowerCAmelCase_ : Tuple = val else: lowerCAmelCase_ : List[Any] = val # load state dict model.load_state_dict(lowerCAmelCase_ ) # Check outputs on an image lowerCAmelCase_ : List[str] = ViTImageProcessor(size=encoder_config.image_size ) lowerCAmelCase_ : str = RobertaTokenizer.from_pretrained('''roberta-large''' ) lowerCAmelCase_ : Dict = TrOCRProcessor(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = processor(images=prepare_img(lowerCAmelCase_ ) , return_tensors='''pt''' ).pixel_values # verify logits lowerCAmelCase_ : Any = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) lowerCAmelCase_ : Tuple = model(pixel_values=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ) lowerCAmelCase_ : Tuple = outputs.logits lowerCAmelCase_ : Optional[Any] = torch.Size([1, 1, 50_265] ) if "trocr-base-handwritten" in checkpoint_url: lowerCAmelCase_ : int = torch.tensor( [-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] ) elif "trocr-large-handwritten" in checkpoint_url: lowerCAmelCase_ : List[str] = torch.tensor( [-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] ) elif "trocr-base-printed" in checkpoint_url: lowerCAmelCase_ : Optional[int] = torch.tensor( [-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] ) elif "trocr-large-printed" in checkpoint_url: lowerCAmelCase_ : Optional[int] = torch.tensor( [-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , lowerCAmelCase_ , atol=1e-3 ), "First elements of logits not as expected" Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCAmelCase_ ) print(f"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": _UpperCAmelCase : List[Any] =argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""", type=str, help="""URL to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) _UpperCAmelCase : Optional[Any] =parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
704
import re def lowerCAmelCase ( lowerCAmelCase_ )-> bool: lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' ) if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
619
0
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _UpperCAmelCase : Optional[Any] =logging.get_logger(__name__) _UpperCAmelCase : List[Any] ={"""vocab_file""": """spiece.model"""} _UpperCAmelCase : Union[str, Any] ={ """vocab_file""": { """AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""", } } _UpperCAmelCase : Optional[int] ={ """AI-Sweden/gpt-sw3-126m""": 2048, """AI-Sweden/gpt-sw3-350m""": 2048, """AI-Sweden/gpt-sw3-1.6b""": 2048, """AI-Sweden/gpt-sw3-6.7b""": 2048, """AI-Sweden/gpt-sw3-20b""": 2048, } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : str = ["""input_ids""", """attention_mask"""] def __init__( self , __lowercase , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase = None , **__lowercase , ) -> None: lowerCAmelCase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs lowerCAmelCase_ : Union[str, Any] = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) lowerCAmelCase_ : Optional[int] = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing lowerCAmelCase_ : Union[str, Any] = '''<|endoftext|>''' if eos_token is None else eos_token lowerCAmelCase_ : str = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: lowerCAmelCase_ : Tuple = unk_token if pad_token is None else pad_token lowerCAmelCase_ : str = eos_token if bos_token is None else bos_token else: lowerCAmelCase_ : List[str] = '''<pad>''' if pad_token is None else pad_token lowerCAmelCase_ : List[str] = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , ) lowerCAmelCase_ : Tuple = do_lower_case lowerCAmelCase_ : Optional[Any] = remove_space lowerCAmelCase_ : Tuple = keep_accents lowerCAmelCase_ : Optional[int] = vocab_file lowerCAmelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowercase ) # Used for whitespace normalization in input texts # fmt : off lowerCAmelCase_ : Dict = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing lowerCAmelCase_ : Any = re.compile( f"""[{"".join(map(__lowercase , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" ) def __getstate__( self ) -> List[Any]: lowerCAmelCase_ : Optional[int] = self.__dict__.copy() lowerCAmelCase_ : Union[str, Any] = None return state def __setstate__( self , __lowercase ) -> List[Any]: lowerCAmelCase_ : Union[str, Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowerCAmelCase_ : List[str] = {} lowerCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def lowercase_ ( self ) -> int: return len(self.sp_model ) def lowercase_ ( self , __lowercase ) -> str: lowerCAmelCase_ : List[Any] = self.non_printing_characters_re.sub('''''' , __lowercase ) # Normalize whitespaces lowerCAmelCase_ : str = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization lowerCAmelCase_ : Any = unicodedata.normalize('''NFC''' , __lowercase ) return text def lowercase_ ( self , __lowercase , **__lowercase ) -> List[str]: lowerCAmelCase_ : Optional[int] = self.preprocess_text(__lowercase ) return self.sp_model.encode(__lowercase , out_type=__lowercase ) def lowercase_ ( self , __lowercase ) -> int: return self.sp_model.PieceToId(__lowercase ) def lowercase_ ( self , __lowercase ) -> str: return self.sp_model.IdToPiece(__lowercase ) @staticmethod def lowercase_ ( __lowercase ) -> str: return out_string def lowercase_ ( self , __lowercase ) -> str: lowerCAmelCase_ : str = [] lowerCAmelCase_ : Union[str, Any] = '''''' lowerCAmelCase_ : str = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__lowercase ) + token lowerCAmelCase_ : Optional[Any] = True lowerCAmelCase_ : Dict = [] else: current_sub_tokens.append(__lowercase ) lowerCAmelCase_ : str = False out_string += self.sp_model.decode(__lowercase ) return out_string def lowercase_ ( self ) -> Dict[str, int]: lowerCAmelCase_ : List[Any] = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]: if not os.path.isdir(__lowercase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ : int = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowercase ) elif not os.path.isfile(self.vocab_file ): with open(__lowercase , '''wb''' ) as fi: lowerCAmelCase_ : str = self.sp_model.serialized_model_proto() fi.write(__lowercase ) return (out_vocab_file,) def lowercase_ ( self , __lowercase , __lowercase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(__lowercase , __lowercase ): lowerCAmelCase_ : Optional[Any] = self.preprocess_text(__lowercase ) lowerCAmelCase_ : Optional[Any] = self.sp_model.encode(__lowercase ) else: lowerCAmelCase_ : Any = [self.preprocess_text(__lowercase ) for t in text] lowerCAmelCase_ : List[str] = self.sp_model.encode(__lowercase ) if return_tensors is True or return_tensors == "pt": lowerCAmelCase_ : int = torch.tensor(__lowercase ) return token_ids def lowercase_ ( self , __lowercase ) -> str: return self.sp_model.decode(__lowercase ) def lowercase_ ( self , __lowercase ) -> List[int]: lowerCAmelCase_ : Optional[int] = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()] lowerCAmelCase_ : List[str] = ( f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(__lowercase ) + f"""{self.bos_token}Bot:""" ) return self.encode(text=__lowercase )
705
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _UpperCAmelCase : Any =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""] def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None: super().__init__(**__lowercase ) lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4} lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : List[Any] = do_resize lowerCAmelCase_ : Optional[int] = size # Default value set here for backwards compatibility where the value in config is None lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 lowerCAmelCase_ : Tuple = resample lowerCAmelCase_ : Optional[int] = do_rescale lowerCAmelCase_ : Any = rescale_factor lowerCAmelCase_ : List[str] = do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray: lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase ) if "shortest_edge" not in size: raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" ) lowerCAmelCase_ : Optional[int] = size['''shortest_edge'''] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct ) lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase ) else: # warping (no cropping) when evaluated at 384 or larger return resize( __lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any: return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray: return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase ) def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image: lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct lowerCAmelCase_ : str = resample if resample is not None else self.resample lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std lowerCAmelCase_ : int = size if size is not None else self.size lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase ) lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images] if do_resize: lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images] if do_rescale: lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images] if do_normalize: lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images] lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] lowerCAmelCase_ : Dict = {'''pixel_values''': images} return BatchFeature(data=__lowercase , tensor_type=__lowercase )
619
0
def lowerCAmelCase ( lowerCAmelCase_ )-> int: lowerCAmelCase_ : List[str] = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def lowerCAmelCase ( lowerCAmelCase_ = 100 )-> int: lowerCAmelCase_ : int = 1 lowerCAmelCase_ : Optional[Any] = 2 for i in range(2 , max_n + 1 ): lowerCAmelCase_ : List[str] = pre_numerator lowerCAmelCase_ : List[str] = 2 * i // 3 if i % 3 == 0 else 1 lowerCAmelCase_ : Any = cur_numerator lowerCAmelCase_ : List[str] = e_cont * pre_numerator + temp return sum_digits(lowerCAmelCase_ ) if __name__ == "__main__": print(f"""{solution() = }""")
706
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Optional[int] =logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] ={ """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese""" def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str: super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) lowerCAmelCase_ : Optional[Any] = vocab_size lowerCAmelCase_ : Tuple = max_position_embeddings lowerCAmelCase_ : Optional[Any] = hidden_size lowerCAmelCase_ : Optional[Any] = num_hidden_layers lowerCAmelCase_ : str = num_attention_heads lowerCAmelCase_ : str = intermediate_multiple_size lowerCAmelCase_ : str = hidden_act lowerCAmelCase_ : Dict = rotary_pct lowerCAmelCase_ : Union[str, Any] = rotary_emb_base lowerCAmelCase_ : int = initializer_range lowerCAmelCase_ : Any = layer_norm_eps lowerCAmelCase_ : Optional[Any] = use_cache lowerCAmelCase_ : Tuple = attention_dropout lowerCAmelCase_ : Dict = hidden_dropout
619
0
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=9_9 , __lowercase=6_4 , __lowercase=5 , __lowercase=4 , __lowercase=3_7 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=1_6 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , ) -> int: lowerCAmelCase_ : Dict = parent lowerCAmelCase_ : Optional[Any] = batch_size lowerCAmelCase_ : str = seq_length lowerCAmelCase_ : Dict = is_training lowerCAmelCase_ : List[Any] = use_input_mask lowerCAmelCase_ : int = use_token_type_ids lowerCAmelCase_ : List[str] = use_labels lowerCAmelCase_ : Union[str, Any] = vocab_size lowerCAmelCase_ : str = hidden_size lowerCAmelCase_ : Any = num_hidden_layers lowerCAmelCase_ : int = num_attention_heads lowerCAmelCase_ : Any = intermediate_size lowerCAmelCase_ : int = hidden_act lowerCAmelCase_ : Dict = hidden_dropout_prob lowerCAmelCase_ : List[Any] = attention_probs_dropout_prob lowerCAmelCase_ : str = max_position_embeddings lowerCAmelCase_ : int = type_vocab_size lowerCAmelCase_ : Union[str, Any] = type_sequence_label_size lowerCAmelCase_ : Union[str, Any] = initializer_range lowerCAmelCase_ : str = num_labels lowerCAmelCase_ : Union[str, Any] = num_choices lowerCAmelCase_ : Any = scope lowerCAmelCase_ : List[str] = vocab_size - 1 def lowercase_ ( self ) -> int: lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ : List[Any] = None if self.use_input_mask: lowerCAmelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase_ : Tuple = None if self.use_labels: lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase_ : str = self.get_config() return config, input_ids, input_mask, token_labels def lowercase_ ( self ) -> Optional[int]: return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs() lowerCAmelCase_ : str = True return config, input_ids, input_mask, token_labels def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> List[str]: lowerCAmelCase_ : str = GPTNeoXModel(config=__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : List[Any] = model(__lowercase , attention_mask=__lowercase ) lowerCAmelCase_ : Union[str, Any] = model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> int: lowerCAmelCase_ : Optional[int] = True lowerCAmelCase_ : List[Any] = GPTNeoXModel(__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : int = model(__lowercase , attention_mask=__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple: lowerCAmelCase_ : Any = GPTNeoXForCausalLM(config=__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Dict = model(__lowercase , attention_mask=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : Optional[int] = self.num_labels lowerCAmelCase_ : Union[str, Any] = GPTNeoXForQuestionAnswering(__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Optional[Any] = model(__lowercase , attention_mask=__lowercase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]: lowerCAmelCase_ : Any = self.num_labels lowerCAmelCase_ : Union[str, Any] = GPTNeoXForSequenceClassification(__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ : Any = model(__lowercase , attention_mask=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> str: lowerCAmelCase_ : Union[str, Any] = self.num_labels lowerCAmelCase_ : Union[str, Any] = GPTNeoXForTokenClassification(__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Optional[Any] = model(__lowercase , attention_mask=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Optional[Any]: lowerCAmelCase_ : Any = True lowerCAmelCase_ : List[Any] = GPTNeoXForCausalLM(config=__lowercase ) model.to(__lowercase ) model.eval() # first forward pass lowerCAmelCase_ : Dict = model(__lowercase , attention_mask=__lowercase , use_cache=__lowercase ) lowerCAmelCase_ : List[str] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase_ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCAmelCase_ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase_ : Tuple = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCAmelCase_ : Dict = model(__lowercase , attention_mask=__lowercase , output_hidden_states=__lowercase ) lowerCAmelCase_ : Any = output_from_no_past['''hidden_states'''][0] lowerCAmelCase_ : Union[str, Any] = model( __lowercase , attention_mask=__lowercase , past_key_values=__lowercase , output_hidden_states=__lowercase , )['''hidden_states'''][0] # select random slice lowerCAmelCase_ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase_ : Any = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase_ : Tuple = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-3 ) ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs() lowerCAmelCase_ : Union[str, Any] = config_and_inputs lowerCAmelCase_ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class snake_case__( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ : str = (GPTNeoXForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Optional[Any] = ( { """feature-extraction""": GPTNeoXModel, """question-answering""": GPTNeoXForQuestionAnswering, """text-classification""": GPTNeoXForSequenceClassification, """text-generation""": GPTNeoXForCausalLM, """token-classification""": GPTNeoXForTokenClassification, """zero-shot""": GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : Union[str, Any] = False SCREAMING_SNAKE_CASE__ : List[str] = False def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Any = GPTNeoXModelTester(self ) lowerCAmelCase_ : Optional[int] = ConfigTester(self , config_class=__lowercase , hidden_size=6_4 , num_attention_heads=8 ) def lowercase_ ( self ) -> int: self.config_tester.run_common_tests() def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__lowercase , __lowercase , __lowercase ) def lowercase_ ( self ) -> int: lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(__lowercase , __lowercase , __lowercase ) def lowercase_ ( self ) -> Dict: # This regression test was failing with PyTorch < 1.3 lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder() lowerCAmelCase_ : Dict = None self.model_tester.create_and_check_model_as_decoder(__lowercase , __lowercase , __lowercase ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowercase , __lowercase , __lowercase ) def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*__lowercase ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowercase ) def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowercase ) def lowercase_ ( self ) -> int: lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowercase ) @unittest.skip(reason='''Feed forward chunking is not implemented''' ) def lowercase_ ( self ) -> Optional[int]: pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def lowercase_ ( self , __lowercase ) -> Tuple: lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ : Optional[Any] = ids_tensor([1, 1_0] , config.vocab_size ) lowerCAmelCase_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase_ : int = GPTNeoXModel(__lowercase ) original_model.to(__lowercase ) original_model.eval() lowerCAmelCase_ : List[Any] = original_model(__lowercase ).last_hidden_state lowerCAmelCase_ : Tuple = original_model(__lowercase ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase_ : List[str] = {'''type''': scaling_type, '''factor''': 10.0} lowerCAmelCase_ : Optional[Any] = GPTNeoXModel(__lowercase ) scaled_model.to(__lowercase ) scaled_model.eval() lowerCAmelCase_ : List[Any] = scaled_model(__lowercase ).last_hidden_state lowerCAmelCase_ : List[str] = scaled_model(__lowercase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) ) @require_torch class snake_case__( unittest.TestCase ): '''simple docstring''' @slow def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' ) for checkpointing in [True, False]: lowerCAmelCase_ : Tuple = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(__lowercase ) lowerCAmelCase_ : List[str] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__lowercase ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 lowerCAmelCase_ : List[str] = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure''' lowerCAmelCase_ : Optional[Any] = model.generate(**__lowercase , do_sample=__lowercase , max_new_tokens=2_0 ) lowerCAmelCase_ : List[str] = tokenizer.batch_decode(__lowercase )[0] self.assertEqual(__lowercase , __lowercase )
707
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]: lowerCAmelCase_ : str = parent lowerCAmelCase_ : Optional[Any] = batch_size lowerCAmelCase_ : List[Any] = is_training lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss lowerCAmelCase_ : List[Any] = num_queries lowerCAmelCase_ : str = num_channels lowerCAmelCase_ : Dict = min_size lowerCAmelCase_ : List[str] = max_size lowerCAmelCase_ : Any = num_labels lowerCAmelCase_ : str = mask_feature_size def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowercase ) lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase ) lowerCAmelCase_ : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5 ).float() lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long() lowerCAmelCase_ : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase_ ( self ) -> List[str]: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowercase_ ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs() lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def lowercase_ ( self , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int: with torch.no_grad(): lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase ) model.to(__lowercase ) model.eval() lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase ) lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowercase , __lowercase ) def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any: lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase ) model.to(__lowercase ) model.eval() def comm_check_on_output(__lowercase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase ) lowerCAmelCase_ : Any = model(__lowercase ) comm_check_on_output(__lowercase ) lowerCAmelCase_ : List[Any] = model( pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase ) comm_check_on_output(__lowercase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Tuple = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : List[str] = False def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Any = MaskFormerModelTester(self ) lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase ) def lowercase_ ( self ) -> Any: self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def lowercase_ ( self ) -> str: pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def lowercase_ ( self ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def lowercase_ ( self ) -> Optional[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowercase_ ( self ) -> Dict: pass def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Tuple = model_class(__lowercase ) lowerCAmelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : str = [*signature.parameters.keys()] lowerCAmelCase_ : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) @slow def lowercase_ ( self ) -> Optional[int]: for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2 lowerCAmelCase_ : List[Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ), '''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(), } lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase ) lowerCAmelCase_ : Dict = model(**__lowercase ) self.assertTrue(outputs.loss is not None ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase ) def lowercase_ ( self ) -> int: lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase ) lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase ) self.assertTrue(outputs.attentions is not None ) def lowercase_ ( self ) -> List[str]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ : int = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Optional[Any] = model_class(__lowercase ) model.to(__lowercase ) model.train() lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss loss.backward() def lowercase_ ( self ) -> Optional[int]: # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase_ : Any = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Any = model_class(__lowercase ) model.to(__lowercase ) model.train() lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ) lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowercase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _UpperCAmelCase : Dict =1E-4 def lowerCAmelCase ( )-> Any: lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class snake_case__( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase_ ( self ) -> Union[str, Any]: return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase ) lowerCAmelCase_ : Dict = self.default_image_processor lowerCAmelCase_ : int = prepare_img() lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : List[str] = model(**__lowercase ) lowerCAmelCase_ : Union[str, Any] = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) lowerCAmelCase_ : List[Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) lowerCAmelCase_ : int = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Dict: lowerCAmelCase_ : Optional[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : Tuple = self.default_image_processor lowerCAmelCase_ : Optional[Any] = prepare_img() lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : Dict = model(**__lowercase ) # masks_queries_logits lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ : Tuple = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) # class_queries_logits lowerCAmelCase_ : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ : Dict = torch.tensor( [ [1.6_512e00, -5.2_572e00, -3.3_519e00], [3.6_169e-02, -5.9_025e00, -2.9_313e00], [1.0_766e-04, -7.7_630e00, -5.1_263e00], ] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : str = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : int = self.default_image_processor lowerCAmelCase_ : Optional[Any] = prepare_img() lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase ) lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): lowerCAmelCase_ : str = model(**__lowercase ) # masks_queries_logits lowerCAmelCase_ : List[str] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) ) # class_queries_logits lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase_ : int = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) ) def lowercase_ ( self ) -> Optional[Any]: lowerCAmelCase_ : Dict = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowercase ) .eval() ) lowerCAmelCase_ : str = self.default_image_processor lowerCAmelCase_ : Union[str, Any] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase ) lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']] lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']] with torch.no_grad(): lowerCAmelCase_ : str = model(**__lowercase ) self.assertTrue(outputs.loss is not None )
619
0
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]: # A local function to see if a dot lands in the circle. def is_in_circle(lowerCAmelCase_ , lowerCAmelCase_ ) -> bool: lowerCAmelCase_ : Tuple = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle lowerCAmelCase_ : Any = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(lowerCAmelCase_ ) ) # The ratio of the area for circle to square is pi/4. lowerCAmelCase_ : Dict = proportion * 4 print(f"""The estimated value of pi is {pi_estimate}""" ) print(f"""The numpy value of pi is {pi}""" ) print(f"""The total error is {abs(pi - pi_estimate )}""" ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0 , )-> float: return mean( function_to_integrate(uniform(lowerCAmelCase_ , lowerCAmelCase_ ) ) for _ in range(lowerCAmelCase_ ) ) * (max_value - min_value) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0 )-> None: def identity_function(lowerCAmelCase_ ) -> float: return x lowerCAmelCase_ : Optional[Any] = area_under_curve_estimator( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : str = (max_value * max_value - min_value * min_value) / 2 print('''******************''' ) print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {expected_value}""" ) print(f"""Total error is {abs(estimated_value - expected_value )}""" ) print('''******************''' ) def lowerCAmelCase ( lowerCAmelCase_ )-> None: def function_to_integrate(lowerCAmelCase_ ) -> float: return sqrt(4.0 - x * x ) lowerCAmelCase_ : List[Any] = area_under_curve_estimator( lowerCAmelCase_ , lowerCAmelCase_ , 0.0 , 2.0 ) print('''******************''' ) print('''Estimating pi using area_under_curve_estimator''' ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {pi}""" ) print(f"""Total error is {abs(estimated_value - pi )}""" ) print('''******************''' ) if __name__ == "__main__": import doctest doctest.testmod()
708
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase__ ) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} ) SCREAMING_SNAKE_CASE__ : str = "audio" SCREAMING_SNAKE_CASE__ : str = "transcription" def lowercase_ ( self , __lowercase ) -> int: if self.audio_column not in features: raise ValueError(f"""Column {self.audio_column} is not present in features.""" ) if not isinstance(features[self.audio_column] , __lowercase ): raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" ) lowerCAmelCase_ : List[str] = copy.deepcopy(self ) lowerCAmelCase_ : Optional[Any] = self.input_schema.copy() lowerCAmelCase_ : Optional[Any] = features[self.audio_column] lowerCAmelCase_ : List[str] = input_schema return task_template @property def lowercase_ ( self ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
619
0
'''simple docstring''' import math def lowerCAmelCase ( lowerCAmelCase_ )-> bool: lowerCAmelCase_ : int = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ = 1 / 12_345 )-> int: lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : List[Any] = 0 lowerCAmelCase_ : Union[str, Any] = 3 while True: lowerCAmelCase_ : Tuple = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(lowerCAmelCase_ ): lowerCAmelCase_ : Dict = int(lowerCAmelCase_ ) total_partitions += 1 if check_partition_perfect(lowerCAmelCase_ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(lowerCAmelCase_ ) integer += 1 if __name__ == "__main__": print(f"""{solution() = }""")
709
_UpperCAmelCase : int =frozenset( [ """prompt""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""]) _UpperCAmelCase : Dict =frozenset([]) _UpperCAmelCase : int =frozenset(["""image"""]) _UpperCAmelCase : Tuple =frozenset( [ """image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : int =frozenset(["""image"""]) _UpperCAmelCase : str =frozenset( [ """prompt""", """image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""]) _UpperCAmelCase : Optional[int] =frozenset( [ # Text guided image variation with an image mask """prompt""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""]) _UpperCAmelCase : Optional[Any] =frozenset( [ # image variation with an image mask """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""]) _UpperCAmelCase : Union[str, Any] =frozenset( [ """example_image""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""]) _UpperCAmelCase : Any =frozenset(["""class_labels"""]) _UpperCAmelCase : List[Any] =frozenset(["""class_labels"""]) _UpperCAmelCase : int =frozenset(["""batch_size"""]) _UpperCAmelCase : str =frozenset([]) _UpperCAmelCase : str =frozenset(["""batch_size"""]) _UpperCAmelCase : Optional[Any] =frozenset([]) _UpperCAmelCase : Tuple =frozenset( [ """prompt""", """audio_length_in_s""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""]) _UpperCAmelCase : List[str] =frozenset(["""input_tokens"""]) _UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""])
619
0
import argparse import logging import pickle from collections import Counter logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) _UpperCAmelCase : List[Any] =logging.getLogger(__name__) if __name__ == "__main__": _UpperCAmelCase : Dict =argparse.ArgumentParser( description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)""" ) parser.add_argument( """--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset.""" ) parser.add_argument( """--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file.""" ) parser.add_argument("""--vocab_size""", default=3_0522, type=int) _UpperCAmelCase : str =parser.parse_args() logger.info(f"""Loading data from {args.data_file}""") with open(args.data_file, """rb""") as fp: _UpperCAmelCase : str =pickle.load(fp) logger.info("""Counting occurrences for MLM.""") _UpperCAmelCase : List[Any] =Counter() for tk_ids in data: counter.update(tk_ids) _UpperCAmelCase : Tuple =[0] * args.vocab_size for k, v in counter.items(): _UpperCAmelCase : Union[str, Any] =v logger.info(f"""Dump to {args.token_counts_dump}""") with open(args.token_counts_dump, """wb""") as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
710
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int: lowerCAmelCase_ : Dict = 1 lowerCAmelCase_ : List[Any] = 1 lowerCAmelCase_ : Optional[Any] = {1: 1} for inputa in range(2 , lowerCAmelCase_ ): lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Dict = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCAmelCase_ : Any = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCAmelCase_ : Tuple = counter if counter > pre_counter: lowerCAmelCase_ : Optional[int] = inputa lowerCAmelCase_ : Union[str, Any] = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
619
0
class snake_case__: '''simple docstring''' def __init__( self , __lowercase ) -> None: lowerCAmelCase_ : List[Any] = len(__lowercase ) lowerCAmelCase_ : Optional[Any] = [0] * len_array if len_array > 0: lowerCAmelCase_ : Optional[int] = array[0] for i in range(1 , __lowercase ): lowerCAmelCase_ : Tuple = self.prefix_sum[i - 1] + array[i] def lowercase_ ( self , __lowercase , __lowercase ) -> int: if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowercase_ ( self , __lowercase ) -> bool: lowerCAmelCase_ : List[str] = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__lowercase ) return False if __name__ == "__main__": import doctest doctest.testmod()
711
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : str =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder""" SCREAMING_SNAKE_CASE__ : str = True def __init__( self , **__lowercase ) -> Union[str, Any]: super().__init__(**__lowercase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" lowerCAmelCase_ : str = kwargs.pop('''encoder''' ) lowerCAmelCase_ : int = encoder_config.pop('''model_type''' ) lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' ) lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase ) lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase ) lowerCAmelCase_ : Any = True @classmethod def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig: logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowerCAmelCase_ : int = True lowerCAmelCase_ : List[Any] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase ) def lowercase_ ( self ) -> Any: lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ : List[str] = self.encoder.to_dict() lowerCAmelCase_ : Dict = self.decoder.to_dict() lowerCAmelCase_ : Optional[Any] = self.__class__.model_type return output
619
0
'''simple docstring''' from __future__ import annotations class snake_case__: '''simple docstring''' def __init__( self , __lowercase ) -> Any: lowerCAmelCase_ : Union[str, Any] = TypeError( '''Matrices must be formed from a list of zero or more lists containing at ''' '''least one and the same number of values, each of which must be of type ''' '''int or float.''' ) if len(__lowercase ) != 0: lowerCAmelCase_ : Optional[Any] = len(rows[0] ) if cols == 0: raise error for row in rows: if len(__lowercase ) != cols: raise error for value in row: if not isinstance(__lowercase , (int, float) ): raise error lowerCAmelCase_ : Optional[Any] = rows else: lowerCAmelCase_ : Optional[int] = [] def lowercase_ ( self ) -> list[list[int]]: return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def lowercase_ ( self ) -> int: return len(self.rows ) @property def lowercase_ ( self ) -> int: return len(self.rows[0] ) @property def lowercase_ ( self ) -> tuple[int, int]: return (self.num_rows, self.num_columns) @property def lowercase_ ( self ) -> bool: return self.order[0] == self.order[1] def lowercase_ ( self ) -> Matrix: lowerCAmelCase_ : List[str] = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(__lowercase ) def lowercase_ ( self ) -> int: if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def lowercase_ ( self ) -> bool: return bool(self.determinant() ) def lowercase_ ( self , __lowercase , __lowercase ) -> int: lowerCAmelCase_ : Tuple = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(__lowercase ).determinant() def lowercase_ ( self , __lowercase , __lowercase ) -> int: if (row + column) % 2 == 0: return self.get_minor(__lowercase , __lowercase ) return -1 * self.get_minor(__lowercase , __lowercase ) def lowercase_ ( self ) -> Matrix: return Matrix( [ [self.get_minor(__lowercase , __lowercase ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def lowercase_ ( self ) -> Matrix: return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def lowercase_ ( self ) -> Matrix: lowerCAmelCase_ : Union[str, Any] = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(__lowercase ) def lowercase_ ( self ) -> Matrix: lowerCAmelCase_ : Tuple = self.determinant() if not determinant: raise TypeError('''Only matrices with a non-zero determinant have an inverse''' ) return self.adjugate() * (1 / determinant) def __repr__( self ) -> str: return str(self.rows ) def __str__( self ) -> str: if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ '''[''' + '''. '''.join([str(__lowercase ) for value in row] ) + '''.]''' for row in self.rows ] ) + "]" ) def lowercase_ ( self , __lowercase , __lowercase = None ) -> None: lowerCAmelCase_ : Dict = TypeError('''Row must be a list containing all ints and/or floats''' ) if not isinstance(__lowercase , __lowercase ): raise type_error for value in row: if not isinstance(__lowercase , (int, float) ): raise type_error if len(__lowercase ) != self.num_columns: raise ValueError( '''Row must be equal in length to the other rows in the matrix''' ) if position is None: self.rows.append(__lowercase ) else: lowerCAmelCase_ : Optional[int] = self.rows[0:position] + [row] + self.rows[position:] def lowercase_ ( self , __lowercase , __lowercase = None ) -> None: lowerCAmelCase_ : int = TypeError( '''Column must be a list containing all ints and/or floats''' ) if not isinstance(__lowercase , __lowercase ): raise type_error for value in column: if not isinstance(__lowercase , (int, float) ): raise type_error if len(__lowercase ) != self.num_rows: raise ValueError( '''Column must be equal in length to the other columns in the matrix''' ) if position is None: lowerCAmelCase_ : Tuple = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: lowerCAmelCase_ : Dict = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self , __lowercase ) -> bool: if not isinstance(__lowercase , __lowercase ): return NotImplemented return self.rows == other.rows def __ne__( self , __lowercase ) -> bool: return not self == other def __neg__( self ) -> Matrix: return self * -1 def __add__( self , __lowercase ) -> Matrix: if self.order != other.order: raise ValueError('''Addition requires matrices of the same order''' ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self , __lowercase ) -> Matrix: if self.order != other.order: raise ValueError('''Subtraction requires matrices of the same order''' ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self , __lowercase ) -> Matrix: if isinstance(__lowercase , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(__lowercase , __lowercase ): if self.num_columns != other.num_rows: raise ValueError( '''The number of columns in the first matrix must ''' '''be equal to the number of rows in the second''' ) return Matrix( [ [Matrix.dot_product(__lowercase , __lowercase ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( '''A Matrix can only be multiplied by an int, float, or another matrix''' ) def __pow__( self , __lowercase ) -> Matrix: if not isinstance(__lowercase , __lowercase ): raise TypeError('''A Matrix can only be raised to the power of an int''' ) if not self.is_square: raise ValueError('''Only square matrices can be raised to a power''' ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( '''Only invertable matrices can be raised to a negative power''' ) lowerCAmelCase_ : Dict = self for _ in range(other - 1 ): result *= self return result @classmethod def lowercase_ ( cls , __lowercase , __lowercase ) -> int: return sum(row[i] * column[i] for i in range(len(__lowercase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
712
from __future__ import annotations from math import pi def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]: if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if inductance < 0: raise ValueError('''Inductance cannot be negative''' ) if frequency < 0: raise ValueError('''Frequency cannot be negative''' ) if reactance < 0: raise ValueError('''Inductive reactance cannot be negative''' ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
619
0
def lowerCAmelCase ( lowerCAmelCase_ = 50 )-> int: lowerCAmelCase_ : List[str] = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f"""{solution() = }""")
713
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging _UpperCAmelCase : Tuple =logging.get_logger(__name__) class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = """linear""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine""" SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts""" SCREAMING_SNAKE_CASE__ : List[str] = """polynomial""" SCREAMING_SNAKE_CASE__ : Dict = """constant""" SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup""" SCREAMING_SNAKE_CASE__ : str = """piecewise_constant""" def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple: return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) ) return 1.0 return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int: lowerCAmelCase_ : Optional[int] = {} lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' ) for rule_str in rule_list[:-1]: lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' ) lowerCAmelCase_ : int = int(lowerCAmelCase_ ) lowerCAmelCase_ : str = float(lowerCAmelCase_ ) lowerCAmelCase_ : List[Any] = value lowerCAmelCase_ : int = float(rule_list[-1] ) def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ): def rule_func(lowerCAmelCase_ ) -> float: lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(lowerCAmelCase_ ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any: lowerCAmelCase_ : Dict = optimizer.defaults['''lr'''] if not (lr_init > lr_end): raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lowerCAmelCase_ : List[Any] = lr_init - lr_end lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _UpperCAmelCase : Union[str, Any] ={ SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]: lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ ) lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , ) return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
619
0
import os import unittest from transformers import LxmertTokenizer, LxmertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case__( UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = LxmertTokenizer SCREAMING_SNAKE_CASE__ : int = LxmertTokenizerFast SCREAMING_SNAKE_CASE__ : Optional[int] = True SCREAMING_SNAKE_CASE__ : Any = True def lowercase_ ( self ) -> Optional[Any]: super().setUp() lowerCAmelCase_ : List[str] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def lowercase_ ( self , __lowercase ) -> Any: lowerCAmelCase_ : Union[str, Any] = '''UNwant\u00E9d,running''' lowerCAmelCase_ : Union[str, Any] = '''unwanted, running''' return input_text, output_text def lowercase_ ( self ) -> int: lowerCAmelCase_ : Optional[Any] = self.tokenizer_class(self.vocab_file ) lowerCAmelCase_ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [7, 4, 5, 1_0, 8, 9] ) def lowercase_ ( self ) -> Union[str, Any]: if not self.test_rust_tokenizer: return lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer() lowerCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() lowerCAmelCase_ : List[str] = '''I was born in 92000, and this is falsé.''' lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(__lowercase ) lowerCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) lowerCAmelCase_ : List[str] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) lowerCAmelCase_ : str = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) self.assertListEqual(__lowercase , __lowercase ) lowerCAmelCase_ : List[str] = self.get_rust_tokenizer() lowerCAmelCase_ : List[str] = tokenizer.encode(__lowercase ) lowerCAmelCase_ : int = rust_tokenizer.encode(__lowercase ) self.assertListEqual(__lowercase , __lowercase )
714
from __future__ import annotations def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative in a semiconductor''' ) elif hole_conc < 0: raise ValueError('''Hole concentration cannot be negative in a semiconductor''' ) elif intrinsic_conc < 0: raise ValueError( '''Intrinsic concentration cannot be negative in a semiconductor''' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
619
0
from abc import ABC, abstractmethod from argparse import ArgumentParser class snake_case__( UpperCAmelCase__ ): '''simple docstring''' @staticmethod @abstractmethod def lowercase_ ( __lowercase ) -> Optional[int]: raise NotImplementedError() @abstractmethod def lowercase_ ( self ) -> int: raise NotImplementedError()
715
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _UpperCAmelCase : Any ="""src/transformers""" # This is to make sure the transformers module imported is the one in the repo. _UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS) _UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` _UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") _UpperCAmelCase : Any ={ """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : Any = None # source code of `config_class` lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ ) lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/''' ): lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: lowerCAmelCase_ : List[str] = ckpt_name break return checkpoint def lowerCAmelCase ( )-> Optional[Any]: lowerCAmelCase_ : Tuple = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ ) lowerCAmelCase_ : Tuple = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) ) raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
619
0
import copy import random from transformers import CLIPTokenizer class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , *__lowercase , **__lowercase ) -> Dict: super().__init__(*__lowercase , **__lowercase ) lowerCAmelCase_ : Union[str, Any] = {} def lowercase_ ( self , __lowercase , *__lowercase , **__lowercase ) -> int: lowerCAmelCase_ : Optional[Any] = super().add_tokens(__lowercase , *__lowercase , **__lowercase ) if num_added_tokens == 0: raise ValueError( f"""The tokenizer already contains the token {placeholder_token}. Please pass a different""" ''' `placeholder_token` that is not already in the tokenizer.''' ) def lowercase_ ( self , __lowercase , *__lowercase , __lowercase=1 , **__lowercase ) -> Any: lowerCAmelCase_ : Union[str, Any] = [] if num_vec_per_token == 1: self.try_adding_tokens(__lowercase , *__lowercase , **__lowercase ) output.append(__lowercase ) else: lowerCAmelCase_ : Any = [] for i in range(__lowercase ): lowerCAmelCase_ : Union[str, Any] = placeholder_token + f"""_{i}""" self.try_adding_tokens(__lowercase , *__lowercase , **__lowercase ) output.append(__lowercase ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( f"""The tokenizer already has placeholder token {token} that can get confused with""" f""" {placeholder_token}keep placeholder tokens independent""" ) lowerCAmelCase_ : Dict = output def lowercase_ ( self , __lowercase , __lowercase=False , __lowercase=1.0 ) -> str: if isinstance(__lowercase , __lowercase ): lowerCAmelCase_ : str = [] for i in range(len(__lowercase ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__lowercase ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: lowerCAmelCase_ : Optional[int] = self.token_map[placeholder_token] lowerCAmelCase_ : int = tokens[: 1 + int(len(__lowercase ) * prop_tokens_to_load )] if vector_shuffle: lowerCAmelCase_ : str = copy.copy(__lowercase ) random.shuffle(__lowercase ) lowerCAmelCase_ : Any = text.replace(__lowercase , ''' '''.join(__lowercase ) ) return text def __call__( self , __lowercase , *__lowercase , __lowercase=False , __lowercase=1.0 , **__lowercase ) -> Any: return super().__call__( self.replace_placeholder_tokens_in_text( __lowercase , vector_shuffle=__lowercase , prop_tokens_to_load=__lowercase ) , *__lowercase , **__lowercase , ) def lowercase_ ( self , __lowercase , *__lowercase , __lowercase=False , __lowercase=1.0 , **__lowercase ) -> Dict: return super().encode( self.replace_placeholder_tokens_in_text( __lowercase , vector_shuffle=__lowercase , prop_tokens_to_load=__lowercase ) , *__lowercase , **__lowercase , )
716
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class snake_case__: '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple: lowerCAmelCase_ : Optional[int] = scheduler lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers] lowerCAmelCase_ : str = split_batches lowerCAmelCase_ : Any = step_with_optimizer lowerCAmelCase_ : Optional[Any] = GradientState() def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__lowercase , **__lowercase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__lowercase , **__lowercase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes for _ in range(__lowercase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , '''total_steps''' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__lowercase , **__lowercase ) else: self.scheduler.step(*__lowercase , **__lowercase ) def lowercase_ ( self ) -> Union[str, Any]: return self.scheduler.get_last_lr() def lowercase_ ( self ) -> List[str]: return self.scheduler.state_dict() def lowercase_ ( self , __lowercase ) -> int: self.scheduler.load_state_dict(__lowercase ) def lowercase_ ( self ) -> Tuple: return self.scheduler.get_lr() def lowercase_ ( self , *__lowercase , **__lowercase ) -> int: return self.scheduler.print_lr(*__lowercase , **__lowercase )
619
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : str =logging.get_logger(__name__) _UpperCAmelCase : Optional[int] ={ """asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""", # See all SEW models at https://huggingface.co/models?filter=sew } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = """sew""" def __init__( self , __lowercase=3_2 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase=2 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowercase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __lowercase=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __lowercase=False , __lowercase=1_2_8 , __lowercase=1_6 , __lowercase=True , __lowercase=0.05 , __lowercase=1_0 , __lowercase=2 , __lowercase=0.0 , __lowercase=1_0 , __lowercase=0 , __lowercase="mean" , __lowercase=False , __lowercase=False , __lowercase=2_5_6 , __lowercase=0 , __lowercase=1 , __lowercase=2 , **__lowercase , ) -> int: super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase ) lowerCAmelCase_ : Dict = hidden_size lowerCAmelCase_ : Tuple = feat_extract_norm lowerCAmelCase_ : Tuple = feat_extract_activation lowerCAmelCase_ : List[str] = list(__lowercase ) lowerCAmelCase_ : Optional[int] = list(__lowercase ) lowerCAmelCase_ : Tuple = list(__lowercase ) lowerCAmelCase_ : Optional[int] = conv_bias lowerCAmelCase_ : List[str] = num_conv_pos_embeddings lowerCAmelCase_ : Tuple = num_conv_pos_embedding_groups lowerCAmelCase_ : List[Any] = len(self.conv_dim ) lowerCAmelCase_ : List[str] = num_hidden_layers lowerCAmelCase_ : Any = intermediate_size lowerCAmelCase_ : Union[str, Any] = squeeze_factor lowerCAmelCase_ : Optional[int] = hidden_act lowerCAmelCase_ : Any = num_attention_heads lowerCAmelCase_ : Optional[int] = hidden_dropout lowerCAmelCase_ : Optional[Any] = attention_dropout lowerCAmelCase_ : Tuple = activation_dropout lowerCAmelCase_ : List[str] = feat_proj_dropout lowerCAmelCase_ : Any = final_dropout lowerCAmelCase_ : int = layerdrop lowerCAmelCase_ : Tuple = layer_norm_eps lowerCAmelCase_ : Any = initializer_range lowerCAmelCase_ : Union[str, Any] = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase_ : str = apply_spec_augment lowerCAmelCase_ : Tuple = mask_time_prob lowerCAmelCase_ : Dict = mask_time_length lowerCAmelCase_ : Optional[int] = mask_time_min_masks lowerCAmelCase_ : Optional[int] = mask_feature_prob lowerCAmelCase_ : Tuple = mask_feature_length lowerCAmelCase_ : Dict = mask_feature_min_masks # ctc loss lowerCAmelCase_ : List[str] = ctc_loss_reduction lowerCAmelCase_ : int = ctc_zero_infinity # sequence classification lowerCAmelCase_ : List[Any] = use_weighted_layer_sum lowerCAmelCase_ : List[Any] = classifier_proj_size @property def lowercase_ ( self ) -> str: return functools.reduce(operator.mul , self.conv_stride , 1 )
717
from manim import * class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def lowercase_ ( self ) -> Tuple: lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : int = [mem.copy() for i in range(6 )] lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 ) lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowercase ) lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )] lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 ) lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) gpu.move_to([-1, -1, 0] ) self.add(__lowercase ) lowerCAmelCase_ : str = [mem.copy() for i in range(6 )] lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 ) lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) model.move_to([3, -1.0, 0] ) self.add(__lowercase ) lowerCAmelCase_ : int = [] lowerCAmelCase_ : int = [] lowerCAmelCase_ : Dict = [] for i, rect in enumerate(__lowercase ): rect.set_stroke(__lowercase ) lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 ) self.add(__lowercase ) model_cpu_arr.append(__lowercase ) self.add(*__lowercase , *__lowercase , *__lowercase ) lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )] lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 ) lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__lowercase ) lowerCAmelCase_ : Optional[Any] = [] lowerCAmelCase_ : Dict = [] for i, rect in enumerate(__lowercase ): lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 ) target.move_to(__lowercase ) ckpt_arr.append(__lowercase ) lowerCAmelCase_ : Union[str, Any] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__lowercase ) self.add(*__lowercase , *__lowercase ) lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase_ : str = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowercase , __lowercase ) lowerCAmelCase_ : str = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , ) blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__lowercase ) lowerCAmelCase_ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )] lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 ) lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 ) lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) ) lowerCAmelCase_ : int = [] for i, rect in enumerate(__lowercase ): lowerCAmelCase_ : int = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__lowercase , run_time=1.5 ) ) self.play(*__lowercase ) self.play(FadeOut(__lowercase ) ) lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowercase , run_time=3 ) ) self.play( FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , ) self.wait()
619
0
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py _UpperCAmelCase : Dict ="""src/transformers""" _UpperCAmelCase : int ="""docs/source/en/tasks""" def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict: with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase_ : Optional[int] = f.readlines() # Find the start prompt. lowerCAmelCase_ : List[str] = 0 while not lines[start_index].startswith(lowerCAmelCase_ ): start_index += 1 start_index += 1 lowerCAmelCase_ : str = start_index while not lines[end_index].startswith(lowerCAmelCase_ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. _UpperCAmelCase : Dict =direct_transformers_import(TRANSFORMERS_PATH) _UpperCAmelCase : int ={ """asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, """audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, """language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, """image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, """masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, """multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, """object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, """question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, """semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, """sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, """summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, """token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, """translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, """video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, """document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, """monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). _UpperCAmelCase : Optional[int] ={ """summarization.md""": ("""nllb""",), """translation.md""": ("""nllb""",), } def lowerCAmelCase ( lowerCAmelCase_ )-> Any: lowerCAmelCase_ : Optional[int] = TASK_GUIDE_TO_MODELS[task_guide] lowerCAmelCase_ : Tuple = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCAmelCase_ , set() ) lowerCAmelCase_ : Dict = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=False )-> Dict: lowerCAmelCase_ : Any = _find_text_in_file( filename=os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , ) lowerCAmelCase_ : Optional[int] = get_model_list_for_task(lowerCAmelCase_ ) if current_list != new_list: if overwrite: with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" ''' to fix this.''' ) if __name__ == "__main__": _UpperCAmelCase : Optional[Any] =argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") _UpperCAmelCase : Union[str, Any] =parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
718
_UpperCAmelCase : Dict =[ (1000, """M"""), (900, """CM"""), (500, """D"""), (400, """CD"""), (100, """C"""), (90, """XC"""), (50, """L"""), (40, """XL"""), (10, """X"""), (9, """IX"""), (5, """V"""), (4, """IV"""), (1, """I"""), ] def lowerCAmelCase ( lowerCAmelCase_ )-> int: lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000} lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : List[str] = 0 while place < len(lowerCAmelCase_ ): if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : List[Any] = [] for arabic, roman in ROMAN: ((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ ) result.append(roman * factor ) if number == 0: break return "".join(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
619
0
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]: lowerCAmelCase_ : int = multiprocessing.Manager() lowerCAmelCase_ : Optional[int] = manager.list() lowerCAmelCase_ : Union[str, Any] = multiprocessing.Process(target=lowerCAmelCase_ , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append('''timed out''' ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]: with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil lowerCAmelCase_ : List[Any] = shutil.rmtree lowerCAmelCase_ : List[Any] = os.rmdir lowerCAmelCase_ : int = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: lowerCAmelCase_ : Tuple = {} with swallow_io(): with time_limit(lowerCAmelCase_ ): exec(lowerCAmelCase_ , lowerCAmelCase_ ) result.append('''passed''' ) except TimeoutException: result.append('''timed out''' ) except BaseException as e: result.append(f"""failed: {e}""" ) # Needed for cleaning up. lowerCAmelCase_ : Optional[Any] = rmtree lowerCAmelCase_ : Any = rmdir lowerCAmelCase_ : Optional[int] = chdir @contextlib.contextmanager def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple: def signal_handler(lowerCAmelCase_ , lowerCAmelCase_ ): raise TimeoutException('''Timed out!''' ) signal.setitimer(signal.ITIMER_REAL , lowerCAmelCase_ ) signal.signal(signal.SIGALRM , lowerCAmelCase_ ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def lowerCAmelCase ( )-> Any: lowerCAmelCase_ : Tuple = WriteOnlyStringIO() with contextlib.redirect_stdout(lowerCAmelCase_ ): with contextlib.redirect_stderr(lowerCAmelCase_ ): with redirect_stdin(lowerCAmelCase_ ): yield @contextlib.contextmanager def lowerCAmelCase ( )-> Optional[Any]: with tempfile.TemporaryDirectory() as dirname: with chdir(lowerCAmelCase_ ): yield dirname class snake_case__( UpperCAmelCase__ ): '''simple docstring''' pass class snake_case__( io.StringIO ): '''simple docstring''' def lowercase_ ( self , *__lowercase , **__lowercase ) -> int: raise OSError def lowercase_ ( self , *__lowercase , **__lowercase ) -> List[Any]: raise OSError def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any: raise OSError def lowercase_ ( self , *__lowercase , **__lowercase ) -> str: return False class snake_case__( contextlib._RedirectStream ): # type: ignore '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = """stdin""" @contextlib.contextmanager def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]: if root == ".": yield return lowerCAmelCase_ : Dict = os.getcwd() os.chdir(lowerCAmelCase_ ) try: yield except BaseException as exc: raise exc finally: os.chdir(lowerCAmelCase_ ) def lowerCAmelCase ( lowerCAmelCase_=None )-> Any: if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins lowerCAmelCase_ : Optional[int] = None lowerCAmelCase_ : List[str] = None import os lowerCAmelCase_ : Tuple = '''1''' lowerCAmelCase_ : Optional[Any] = None lowerCAmelCase_ : int = None lowerCAmelCase_ : List[Any] = None lowerCAmelCase_ : Dict = None lowerCAmelCase_ : Union[str, Any] = None lowerCAmelCase_ : Optional[Any] = None lowerCAmelCase_ : Optional[int] = None lowerCAmelCase_ : Tuple = None lowerCAmelCase_ : int = None lowerCAmelCase_ : Any = None lowerCAmelCase_ : Union[str, Any] = None lowerCAmelCase_ : List[Any] = None lowerCAmelCase_ : List[str] = None lowerCAmelCase_ : Optional[int] = None lowerCAmelCase_ : Optional[int] = None lowerCAmelCase_ : Optional[int] = None lowerCAmelCase_ : Dict = None lowerCAmelCase_ : Any = None lowerCAmelCase_ : List[Any] = None lowerCAmelCase_ : Optional[Any] = None lowerCAmelCase_ : Optional[Any] = None lowerCAmelCase_ : Any = None lowerCAmelCase_ : Any = None lowerCAmelCase_ : int = None lowerCAmelCase_ : Tuple = None lowerCAmelCase_ : Any = None lowerCAmelCase_ : Dict = None import shutil lowerCAmelCase_ : Optional[int] = None lowerCAmelCase_ : Union[str, Any] = None lowerCAmelCase_ : List[str] = None import subprocess lowerCAmelCase_ : Union[str, Any] = None # type: ignore lowerCAmelCase_ : int = None import sys lowerCAmelCase_ : Union[str, Any] = None lowerCAmelCase_ : List[str] = None lowerCAmelCase_ : Dict = None lowerCAmelCase_ : Optional[int] = None lowerCAmelCase_ : Optional[Any] = None
719
import csv import tweepy # Twitter API credentials _UpperCAmelCase : int ="""""" _UpperCAmelCase : Optional[int] ="""""" _UpperCAmelCase : Dict ="""""" _UpperCAmelCase : str ="""""" def lowerCAmelCase ( lowerCAmelCase_ )-> None: # authorize twitter, initialize tweepy lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ ) auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ ) # initialize a list to hold all the tweepy Tweets lowerCAmelCase_ : Dict = [] # make initial request for most recent tweets (200 is the maximum allowed count) lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # save the id of the oldest tweet less one lowerCAmelCase_ : str = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowerCAmelCase_ ) > 0: print(f"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates lowerCAmelCase_ : Optional[Any] = api.user_timeline( screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # update the id of the oldest tweet less one lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1 print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f: lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(lowerCAmelCase_ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
619
0
import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _UpperCAmelCase : List[Any] =get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class snake_case__( UpperCAmelCase__, unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = AlbertTokenizer SCREAMING_SNAKE_CASE__ : List[str] = AlbertTokenizerFast SCREAMING_SNAKE_CASE__ : Dict = True SCREAMING_SNAKE_CASE__ : Dict = True SCREAMING_SNAKE_CASE__ : List[str] = True def lowercase_ ( self ) -> Dict: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase_ : Union[str, Any] = AlbertTokenizer(__lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self , __lowercase ) -> Tuple: lowerCAmelCase_ : Union[str, Any] = '''this is a test''' lowerCAmelCase_ : Tuple = '''this is a test''' return input_text, output_text def lowercase_ ( self ) -> List[Any]: lowerCAmelCase_ : Dict = '''<pad>''' lowerCAmelCase_ : Union[str, Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase ) def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''▁eloquent''' ) self.assertEqual(len(__lowercase ) , 3_0_0_0_0 ) def lowercase_ ( self ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 ) def lowercase_ ( self ) -> Optional[Any]: if not self.test_rust_tokenizer: return lowerCAmelCase_ : int = self.get_tokenizer() lowerCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() lowerCAmelCase_ : int = '''I was born in 92000, and this is falsé.''' lowerCAmelCase_ : int = tokenizer.tokenize(__lowercase ) lowerCAmelCase_ : str = rust_tokenizer.tokenize(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) lowerCAmelCase_ : str = tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) lowerCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) self.assertListEqual(__lowercase , __lowercase ) lowerCAmelCase_ : List[str] = self.get_rust_tokenizer() lowerCAmelCase_ : str = tokenizer.encode(__lowercase ) lowerCAmelCase_ : Optional[int] = rust_tokenizer.encode(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) def lowercase_ ( self ) -> Optional[int]: lowerCAmelCase_ : List[str] = AlbertTokenizer(__lowercase , keep_accents=__lowercase ) lowerCAmelCase_ : Any = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__lowercase , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [4_8, 2_5, 2_1, 1_2_8_9] ) lowerCAmelCase_ : List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __lowercase , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] ) lowerCAmelCase_ : Any = tokenizer.convert_tokens_to_ids(__lowercase ) self.assertListEqual(__lowercase , [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] ) lowerCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(__lowercase ) self.assertListEqual( __lowercase , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , ) def lowercase_ ( self ) -> List[str]: lowerCAmelCase_ : Dict = AlbertTokenizer(__lowercase ) lowerCAmelCase_ : Optional[int] = tokenizer.encode('''sequence builders''' ) lowerCAmelCase_ : List[Any] = tokenizer.encode('''multi-sequence build''' ) lowerCAmelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowercase ) lowerCAmelCase_ : str = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def lowercase_ ( self ) -> str: # fmt: off lowerCAmelCase_ : Optional[Any] = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowercase , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
720
from math import sqrt def lowerCAmelCase ( lowerCAmelCase_ )-> bool: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase_ : str = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase_ : List[Any] = False for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase_ : Any = False break # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool" return status def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) ) lowerCAmelCase_ : List[Any] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowerCAmelCase_ ) ): for j in range(i + 1 , len(lowerCAmelCase_ ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase_ : Tuple = 0 # filters actual prime numbers. lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0] # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> int: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase_ : List[str] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(lowerCAmelCase_ ): ans.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase_ : Any = 2 lowerCAmelCase_ : List[str] = number if number == 0 or number == 1: ans.append(lowerCAmelCase_ ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowerCAmelCase_ ): while quotient != 1: if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0): ans.append(lowerCAmelCase_ ) quotient /= factor else: factor += 1 else: ans.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = 0 # prime factorization of 'number' lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : Dict = max(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> str: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase_ : Union[str, Any] = 0 # prime factorization of 'number' lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int" assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool" return number % 2 == 0 def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int" assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool" return number % 2 != 0 def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ ) ), "'number' must been an int, even and > 2" lowerCAmelCase_ : Union[str, Any] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ ) lowerCAmelCase_ : Any = len(lowerCAmelCase_ ) # run variable for while-loops. lowerCAmelCase_ : List[Any] = 0 lowerCAmelCase_ : List[Any] = None # exit variable. for break up the loops lowerCAmelCase_ : int = True while i < len_pn and loop: lowerCAmelCase_ : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase_ : Union[str, Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (len(lowerCAmelCase_ ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase_ : List[str] = 0 while numbera != 0: lowerCAmelCase_ : int = numbera % numbera lowerCAmelCase_ : Union[str, Any] = numbera lowerCAmelCase_ : Tuple = rest # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase_ : Dict = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ ) lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ ) elif numbera == 1 or numbera == 1: lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : str = 0 lowerCAmelCase_ : List[str] = 0 lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ): ans *= n else: lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ): ans *= n done.append(lowerCAmelCase_ ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ): ans *= n done.append(lowerCAmelCase_ ) # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Dict: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowerCAmelCase_ ): ans += 1 # precondition assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime( lowerCAmelCase_ ), "'ans' must been a prime number and from type int" return ans def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict: assert ( is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number lowerCAmelCase_ : Dict = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowerCAmelCase_ ): number += 1 while number < p_number_a: ans.append(lowerCAmelCase_ ) number += 1 # fetch the next prime number. while not is_prime(lowerCAmelCase_ ): number += 1 # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ans[0] != p_number_a and ans[len(lowerCAmelCase_ ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase_ : List[str] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(lowerCAmelCase_ ) # precondition assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ ) # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (divisors[0] == 1) and (divisors[len(lowerCAmelCase_ ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any: assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) ) # precondition assert ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase_ : Tuple = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Optional[Any] = 1 lowerCAmelCase_ : Tuple = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase_ : Any = ans ans += fiba lowerCAmelCase_ : Dict = tmp return ans
619
0