code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
def lowerCamelCase__ ( A__ : int ): '''simple docstring''' __lowerCamelCase = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
80
import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = [] __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCamelCase_ ) __lowerCamelCase = resnets __lowerCamelCase = attentions if self.add_downsample: __lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=True ): __lowerCamelCase = () for resnet, attn in zip(self.resnets , self.attentions ): __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) __lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: __lowerCamelCase = self.downsamplers_a(UpperCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = resnets if self.add_downsample: __lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=True ): __lowerCamelCase = () for resnet in self.resnets: __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: __lowerCamelCase = self.downsamplers_a(UpperCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = [] __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels __lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCamelCase_ ) __lowerCamelCase = resnets __lowerCamelCase = attentions if self.add_upsample: __lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any]=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states __lowerCamelCase = res_hidden_states_tuple[-1] __lowerCamelCase = res_hidden_states_tuple[:-1] __lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) __lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) if self.add_upsample: __lowerCamelCase = self.upsamplers_a(UpperCamelCase_ ) return hidden_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels __lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = resnets if self.add_upsample: __lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=True ): for resnet in self.resnets: # pop res hidden states __lowerCamelCase = res_hidden_states_tuple[-1] __lowerCamelCase = res_hidden_states_tuple[:-1] __lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) if self.add_upsample: __lowerCamelCase = self.upsamplers_a(UpperCamelCase_ ) return hidden_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: int ): # there is always at least one resnet __lowerCamelCase = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] __lowerCamelCase = [] for _ in range(self.num_layers ): __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCamelCase_ ) __lowerCamelCase = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = resnets __lowerCamelCase = attentions def __call__( self: int , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=True ): __lowerCamelCase = self.resnets[0](UpperCamelCase_ , UpperCamelCase_ ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): __lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) return hidden_states
80
1
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) UpperCAmelCase_ = 'hf-internal-testing/tiny-random-bert' UpperCAmelCase_ = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert') UpperCAmelCase_ = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6' class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = cached_file(UpperCamelCase_ , UpperCamelCase_ ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(UpperCamelCase_ ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) ) ) with open(os.path.join(UpperCamelCase_ , """refs""" , """main""" ) ) as f: __lowerCamelCase = f.read() self.assertEqual(UpperCamelCase_ , os.path.join(UpperCamelCase_ , """snapshots""" , UpperCamelCase_ , UpperCamelCase_ ) ) self.assertTrue(os.path.isfile(UpperCamelCase_ ) ) # File is cached at the same place the second time. __lowerCamelCase = cached_file(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) # Using a specific revision to test the full commit hash. __lowerCamelCase = cached_file(UpperCamelCase_ , UpperCamelCase_ , revision="""9b8c223""" ) self.assertEqual(UpperCamelCase_ , os.path.join(UpperCamelCase_ , """snapshots""" , UpperCamelCase_ , UpperCamelCase_ ) ) def lowerCAmelCase__ ( self: Dict ): with self.assertRaisesRegex(UpperCamelCase_ , """is not a valid model identifier""" ): __lowerCamelCase = cached_file("""tiny-random-bert""" , UpperCamelCase_ ) with self.assertRaisesRegex(UpperCamelCase_ , """is not a valid git identifier""" ): __lowerCamelCase = cached_file(UpperCamelCase_ , UpperCamelCase_ , revision="""aaaa""" ) with self.assertRaisesRegex(UpperCamelCase_ , """does not appear to have a file named""" ): __lowerCamelCase = cached_file(UpperCamelCase_ , """conf""" ) def lowerCAmelCase__ ( self: Optional[int] ): with self.assertRaisesRegex(UpperCamelCase_ , """does not appear to have a file named""" ): __lowerCamelCase = cached_file(UpperCamelCase_ , """conf""" ) with open(os.path.join(UpperCamelCase_ , """refs""" , """main""" ) ) as f: __lowerCamelCase = f.read() self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase_ , """.no_exist""" , UpperCamelCase_ , """conf""" ) ) ) __lowerCamelCase = cached_file(UpperCamelCase_ , """conf""" , _raise_exceptions_for_missing_entries=UpperCamelCase_ ) self.assertIsNone(UpperCamelCase_ ) __lowerCamelCase = cached_file(UpperCamelCase_ , """conf""" , local_files_only=UpperCamelCase_ , _raise_exceptions_for_missing_entries=UpperCamelCase_ ) self.assertIsNone(UpperCamelCase_ ) __lowerCamelCase = mock.Mock() __lowerCamelCase = 5_00 __lowerCamelCase = {} __lowerCamelCase = HTTPError __lowerCamelCase = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" , return_value=UpperCamelCase_ ) as mock_head: __lowerCamelCase = cached_file(UpperCamelCase_ , """conf""" , _raise_exceptions_for_connection_errors=UpperCamelCase_ ) self.assertIsNone(UpperCamelCase_ ) # This check we did call the fake head request mock_head.assert_called() def lowerCAmelCase__ ( self: Dict ): self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase_ ) ) self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase_ ) ) self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase_ ) ) def lowerCAmelCase__ ( self: str ): # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(UpperCamelCase_ , """is not a valid model identifier""" ): get_file_from_repo("""bert-base-case""" , UpperCamelCase_ ) # The function raises if the revision does not exist. with self.assertRaisesRegex(UpperCamelCase_ , """is not a valid git identifier""" ): get_file_from_repo("""bert-base-cased""" , UpperCamelCase_ , revision="""ahaha""" ) __lowerCamelCase = get_file_from_repo("""bert-base-cased""" , UpperCamelCase_ ) # The name is the cached name which is not very easy to test, so instead we load the content. __lowerCamelCase = json.loads(open(UpperCamelCase_ , """r""" ).read() ) self.assertEqual(config["""hidden_size"""] , 7_68 ) def lowerCAmelCase__ ( self: str ): with tempfile.TemporaryDirectory() as tmp_dir: __lowerCamelCase = Path(UpperCamelCase_ ) / """a.txt""" filename.touch() self.assertEqual(get_file_from_repo(UpperCamelCase_ , """a.txt""" ) , str(UpperCamelCase_ ) ) self.assertIsNone(get_file_from_repo(UpperCamelCase_ , """b.txt""" ) )
80
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging UpperCAmelCase_ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt'] UpperCAmelCase_ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('0.9.0'): raise Exception('requires fairseq >= 0.9.0') logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = ' Hello world! cécé herlolip' UpperCAmelCase_ = [ ('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'), ('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'), ('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'), ('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'), ] def lowerCamelCase__ ( A__ : List[Any] ): '''simple docstring''' __lowerCamelCase = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """_float_tensor""", ] for k in ignore_keys: state_dict.pop(A__ , A__ ) def lowerCamelCase__ ( A__ : Tuple , A__ : Any , A__ : Union[str, Any] ): '''simple docstring''' __lowerCamelCase = dct.pop(A__ ) __lowerCamelCase = val def lowerCamelCase__ ( A__ : Tuple ): '''simple docstring''' __lowerCamelCase = torch.load(A__ , map_location="""cpu""" ) __lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval() hub_interface.model.load_state_dict(sd["""model"""] ) return hub_interface def lowerCamelCase__ ( A__ : List[Any] ): '''simple docstring''' __lowerCamelCase, __lowerCamelCase = emb.weight.shape __lowerCamelCase = nn.Linear(A__ , A__ , bias=A__ ) __lowerCamelCase = emb.weight.data return lin_layer @torch.no_grad() def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None ): '''simple docstring''' if not os.path.exists(A__ ): __lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , A__ ).eval() else: __lowerCamelCase = load_xsum_checkpoint(A__ ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: __lowerCamelCase = checkpoint_path.replace(""".""" , """-""" ) __lowerCamelCase = BartConfig.from_pretrained(A__ ) __lowerCamelCase = bart.encode(A__ ).unsqueeze(0 ) __lowerCamelCase = BartTokenizer.from_pretrained(A__ ).encode(A__ , return_tensors="""pt""" ).unsqueeze(0 ) if not torch.eq(A__ , A__ ).all(): raise ValueError( f'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' ) if checkpoint_path == "bart.large.mnli": __lowerCamelCase = bart.state_dict() remove_ignore_keys_(A__ ) __lowerCamelCase = state_dict["""model.decoder.embed_tokens.weight"""] for src, dest in mnli_rename_keys: rename_key(A__ , A__ , A__ ) __lowerCamelCase = BartForSequenceClassification(A__ ).eval() model.load_state_dict(A__ ) __lowerCamelCase = bart.predict("""mnli""" , A__ , return_logits=A__ ) __lowerCamelCase = model(A__ )[0] # logits else: # no classification heads to worry about __lowerCamelCase = bart.model.state_dict() remove_ignore_keys_(A__ ) __lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""] __lowerCamelCase = bart.extract_features(A__ ) if hf_checkpoint_name == "facebook/bart-large": __lowerCamelCase = BartModel(A__ ).eval() model.load_state_dict(A__ ) __lowerCamelCase = model(A__ ).model[0] else: __lowerCamelCase = BartForConditionalGeneration(A__ ).eval() # an existing summarization ckpt model.model.load_state_dict(A__ ) if hasattr(A__ , """lm_head""" ): __lowerCamelCase = make_linear_from_emb(model.model.shared ) __lowerCamelCase = model.model(A__ )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( f'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum' ) UpperCAmelCase_ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
80
1
from math import factorial def lowerCamelCase__ ( A__ : int , A__ : int ): '''simple docstring''' if n < k or k < 0: raise ValueError("""Please enter positive integers for n and k where n >= k""" ) return factorial(A__ ) // (factorial(A__ ) * factorial(n - k )) if __name__ == "__main__": print( 'The number of five-card hands possible from a standard', f"""fifty-two card deck is: {combinations(52, 5)}\n""", ) print( 'If a class of 40 students must be arranged into groups of', f"""4 for group projects, there are {combinations(40, 4)} ways""", 'to arrange them.\n', ) print( 'If 10 teams are competing in a Formula One race, there', f"""are {combinations(10, 3)} ways that first, second and""", 'third place can be awarded.', )
80
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class lowerCamelCase__: def __init__( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=14 , UpperCamelCase_: int=7 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: int=99 , UpperCamelCase_: str=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: Dict=16 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: List[str]=3 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Tuple=None , ): __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_input_mask __lowerCamelCase = use_labels __lowerCamelCase = use_mc_token_ids __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope __lowerCamelCase = self.vocab_size - 1 def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None if self.use_mc_token_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() __lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase__ ( self: Dict ): return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[str] , *UpperCamelCase_: Optional[Any] ): __lowerCamelCase = CTRLModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ ) model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , *UpperCamelCase_: Tuple ): __lowerCamelCase = CTRLLMHeadModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ) = config_and_inputs __lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask} return config, inputs_dict def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , *UpperCamelCase_: Union[str, Any] ): __lowerCamelCase = self.num_labels __lowerCamelCase = CTRLForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Any = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () UpperCAmelCase__ : Optional[Any] = (CTRLLMHeadModel,) if is_torch_available() else () UpperCAmelCase__ : int = ( { 'feature-extraction': CTRLModel, 'text-classification': CTRLForSequenceClassification, 'text-generation': CTRLLMHeadModel, 'zero-shot': CTRLForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ : List[str] = True UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Optional[Any] = False def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = CTRLModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 ) def lowerCAmelCase__ ( self: Optional[int] ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self: Optional[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase__ ( self: List[Any] ): pass @slow def lowerCAmelCase__ ( self: Optional[Any] ): for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = CTRLModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :) def lowerCAmelCase__ ( self: Optional[Any] ): pass @require_torch class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: List[str] ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" ) model.to(UpperCamelCase_ ) __lowerCamelCase = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=UpperCamelCase_ ) # Legal the president is __lowerCamelCase = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a __lowerCamelCase = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ ) self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
80
1
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def lowerCamelCase__ ( A__ : Dict , A__ : Any=7 ): '''simple docstring''' __lowerCamelCase = None if token is not None: __lowerCamelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'} # The id of a workflow (not of a workflow run) __lowerCamelCase = """636036""" __lowerCamelCase = f'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}' __lowerCamelCase = requests.get(A__ , headers=A__ ).json() return result["workflow_runs"] def lowerCamelCase__ ( A__ : List[str] ): '''simple docstring''' __lowerCamelCase = get_daily_ci_runs(A__ ) __lowerCamelCase = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": __lowerCamelCase = workflow_run["""id"""] break return workflow_run_id def lowerCamelCase__ ( A__ : List[Any] , A__ : str , A__ : Optional[int] ): '''simple docstring''' __lowerCamelCase = get_last_daily_ci_runs(A__ ) if workflow_run_id is not None: __lowerCamelCase = get_artifacts_links(worflow_run_id=A__ , token=A__ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: __lowerCamelCase = artifacts_links[artifact_name] download_artifact( artifact_name=A__ , artifact_url=A__ , output_dir=A__ , token=A__ ) def lowerCamelCase__ ( A__ : List[Any] , A__ : Optional[Any] , A__ : Any ): '''simple docstring''' get_last_daily_ci_artifacts(A__ , A__ , A__ ) __lowerCamelCase = {} for artifact_name in artifact_names: __lowerCamelCase = os.path.join(A__ , f'{artifact_name}.zip' ) if os.path.isfile(A__ ): __lowerCamelCase = {} with zipfile.ZipFile(A__ ) as z: for filename in z.namelist(): if not os.path.isdir(A__ ): # read the file with z.open(A__ ) as f: __lowerCamelCase = f.read().decode("""UTF-8""" ) return results
80
def lowerCamelCase__ ( A__ : int = 2000000 ): '''simple docstring''' __lowerCamelCase = [0 for i in range(n + 1 )] __lowerCamelCase = 1 __lowerCamelCase = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , A__ ): __lowerCamelCase = 1 __lowerCamelCase = 0 for i in range(A__ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f"""{solution() = }""")
80
1
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCAmelCase_ = abspath(join(dirname(__file__), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def lowerCamelCase__ ( A__ : Optional[int] ): '''simple docstring''' config.addinivalue_line( """markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" ) config.addinivalue_line( """markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" ) config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" ) config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" ) config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" ) config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" ) def lowerCamelCase__ ( A__ : Optional[int] ): '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(A__ ) def lowerCamelCase__ ( A__ : List[str] ): '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main __lowerCamelCase = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(A__ , id=A__ ) def lowerCamelCase__ ( A__ : str , A__ : Tuple ): '''simple docstring''' if exitstatus == 5: __lowerCamelCase = 0 # Doctest custom flag to ignore output. UpperCAmelCase_ = doctest.register_optionflag('IGNORE_RESULT') UpperCAmelCase_ = doctest.OutputChecker class lowerCamelCase__( __lowerCamelCase): def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Tuple , UpperCamelCase_: Any ): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) UpperCAmelCase_ = CustomOutputChecker UpperCAmelCase_ = HfDoctestModule UpperCAmelCase_ = HfDocTestParser
80
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase): UpperCAmelCase__ : Dict = 1 @register_to_config def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None ): # set `betas`, `alphas`, `timesteps` self.set_timesteps(UpperCamelCase_ ) # standard deviation of the initial noise distribution __lowerCamelCase = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. __lowerCamelCase = 4 # running values __lowerCamelCase = [] def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None ): __lowerCamelCase = num_inference_steps __lowerCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] __lowerCamelCase = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: __lowerCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: __lowerCamelCase = torch.sin(steps * math.pi / 2 ) ** 2 __lowerCamelCase = (1.0 - self.betas**2) ** 0.5 __lowerCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] __lowerCamelCase = timesteps.to(UpperCamelCase_ ) __lowerCamelCase = [] def lowerCAmelCase__ ( self: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True , ): if self.num_inference_steps is None: raise ValueError( """Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" ) __lowerCamelCase = (self.timesteps == timestep).nonzero().item() __lowerCamelCase = timestep_index + 1 __lowerCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(UpperCamelCase_ ) if len(self.ets ) == 1: __lowerCamelCase = self.ets[-1] elif len(self.ets ) == 2: __lowerCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: __lowerCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: __lowerCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) __lowerCamelCase = self._get_prev_sample(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ): return sample def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any ): __lowerCamelCase = self.alphas[timestep_index] __lowerCamelCase = self.betas[timestep_index] __lowerCamelCase = self.alphas[prev_timestep_index] __lowerCamelCase = self.betas[prev_timestep_index] __lowerCamelCase = (sample - sigma * ets) / max(UpperCamelCase_ , 1E-8 ) __lowerCamelCase = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self: List[Any] ): return self.config.num_train_timesteps
80
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def lowerCamelCase__ ( A__ : Optional[Any] ): '''simple docstring''' return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def lowerCamelCase__ ( A__ : Optional[Any] ): '''simple docstring''' __lowerCamelCase = create_tensor(A__ ) __lowerCamelCase = gather(A__ ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def lowerCamelCase__ ( A__ : Union[str, Any] ): '''simple docstring''' __lowerCamelCase = [state.process_index] __lowerCamelCase = gather_object(A__ ) assert len(A__ ) == state.num_processes, f'{gathered_obj}, {len(A__ )} != {state.num_processes}' assert gathered_obj == list(range(state.num_processes ) ), f'{gathered_obj} != {list(range(state.num_processes ) )}' def lowerCamelCase__ ( A__ : List[Any] ): '''simple docstring''' __lowerCamelCase = create_tensor(A__ ) __lowerCamelCase = broadcast(A__ ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def lowerCamelCase__ ( A__ : Any ): '''simple docstring''' if state.is_main_process: __lowerCamelCase = torch.arange(state.num_processes + 1 ).to(state.device ) else: __lowerCamelCase = torch.arange(state.num_processes ).to(state.device ) __lowerCamelCase = pad_across_processes(A__ ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def lowerCamelCase__ ( A__ : int ): '''simple docstring''' if state.num_processes != 2: return __lowerCamelCase = create_tensor(A__ ) __lowerCamelCase = reduce(A__ , """sum""" ) __lowerCamelCase = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(A__ , A__ ), f'{reduced_tensor} != {truth_tensor}' def lowerCamelCase__ ( A__ : Union[str, Any] ): '''simple docstring''' if state.num_processes != 2: return __lowerCamelCase = create_tensor(A__ ) __lowerCamelCase = reduce(A__ , """mean""" ) __lowerCamelCase = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(A__ , A__ ), f'{reduced_tensor} != {truth_tensor}' def lowerCamelCase__ ( A__ : Dict ): '''simple docstring''' main() def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = PartialState() state.print(f'State: {state}' ) state.print("""testing gather""" ) test_gather(A__ ) state.print("""testing gather_object""" ) test_gather_object(A__ ) state.print("""testing broadcast""" ) test_broadcast(A__ ) state.print("""testing pad_across_processes""" ) test_pad_across_processes(A__ ) state.print("""testing reduce_sum""" ) test_reduce_sum(A__ ) state.print("""testing reduce_mean""" ) test_reduce_mean(A__ ) if __name__ == "__main__": main()
80
import os from collections.abc import Iterator def lowerCamelCase__ ( A__ : str = "." ): '''simple docstring''' for dir_path, dir_names, filenames in os.walk(A__ ): __lowerCamelCase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(A__ )[1] in (".py", ".ipynb"): yield os.path.join(A__ , A__ ).lstrip("""./""" ) def lowerCamelCase__ ( A__ : Optional[int] ): '''simple docstring''' return f'{i * " "}*' if i else "\n##" def lowerCamelCase__ ( A__ : str , A__ : str ): '''simple docstring''' __lowerCamelCase = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part: print(f'{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}' ) return new_path def lowerCamelCase__ ( A__ : str = "." ): '''simple docstring''' __lowerCamelCase = """""" for filepath in sorted(good_file_paths(A__ ) ): __lowerCamelCase, __lowerCamelCase = os.path.split(A__ ) if filepath != old_path: __lowerCamelCase = print_path(A__ , A__ ) __lowerCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0 __lowerCamelCase = f'{filepath}/{filename}'.replace(""" """ , """%20""" ) __lowerCamelCase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0] print(f'{md_prefix(A__ )} [{filename}]({url})' ) if __name__ == "__main__": print_directory_md('.')
80
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCamelCase__( __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Tuple = ShapEPipeline UpperCAmelCase__ : Tuple = ['prompt'] UpperCAmelCase__ : Any = ['prompt'] UpperCAmelCase__ : Union[str, Any] = [ 'num_images_per_prompt', 'num_inference_steps', 'generator', 'latents', 'guidance_scale', 'frame_size', 'output_type', 'return_dict', ] UpperCAmelCase__ : str = False @property def lowerCAmelCase__ ( self: Dict ): return 32 @property def lowerCAmelCase__ ( self: Tuple ): return 32 @property def lowerCAmelCase__ ( self: int ): return self.time_input_dim * 4 @property def lowerCAmelCase__ ( self: List[str] ): return 8 @property def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def lowerCAmelCase__ ( self: Any ): torch.manual_seed(0 ) __lowerCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(UpperCamelCase_ ) @property def lowerCAmelCase__ ( self: Dict ): torch.manual_seed(0 ) __lowerCamelCase = { """num_attention_heads""": 2, """attention_head_dim""": 16, """embedding_dim""": self.time_input_dim, """num_embeddings""": 32, """embedding_proj_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """num_layers""": 1, """clip_embed_dim""": self.time_input_dim * 2, """additional_embeddings""": 0, """time_embed_act_fn""": """gelu""", """norm_in_type""": """layer""", """encoder_hid_proj_type""": None, """added_emb_type""": None, } __lowerCamelCase = PriorTransformer(**UpperCamelCase_ ) return model @property def lowerCAmelCase__ ( self: Optional[int] ): torch.manual_seed(0 ) __lowerCamelCase = { """param_shapes""": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), """d_latent""": self.time_input_dim, """d_hidden""": self.renderer_dim, """n_output""": 12, """background""": ( 0.1, 0.1, 0.1, ), } __lowerCamelCase = ShapERenderer(**UpperCamelCase_ ) return model def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = self.dummy_prior __lowerCamelCase = self.dummy_text_encoder __lowerCamelCase = self.dummy_tokenizer __lowerCamelCase = self.dummy_renderer __lowerCamelCase = HeunDiscreteScheduler( beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , ) __lowerCamelCase = { """prior""": prior, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """renderer""": renderer, """scheduler""": scheduler, } return components def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any]=0 ): if str(UpperCamelCase_ ).startswith("""mps""" ): __lowerCamelCase = torch.manual_seed(UpperCamelCase_ ) else: __lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __lowerCamelCase = { """prompt""": """horse""", """generator""": generator, """num_inference_steps""": 1, """frame_size""": 32, """output_type""": """np""", } return inputs def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = """cpu""" __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**UpperCamelCase_ ) __lowerCamelCase = pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowerCamelCase = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) ) __lowerCamelCase = output.images[0] __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __lowerCamelCase = np.array( [ 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase__ ( self: Any ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowerCAmelCase__ ( self: str ): __lowerCamelCase = torch_device == """cpu""" __lowerCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , ) def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**UpperCamelCase_ ) __lowerCamelCase = pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowerCamelCase = 1 __lowerCamelCase = 2 __lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ ) for key in inputs.keys(): if key in self.batch_params: __lowerCamelCase = batch_size * [inputs[key]] __lowerCamelCase = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: Optional[int] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_np_out.npy""" ) __lowerCamelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" ) __lowerCamelCase = pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 ) __lowerCamelCase = pipe( """a shark""" , generator=UpperCamelCase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
80
from __future__ import annotations def lowerCamelCase__ ( A__ : list ): '''simple docstring''' if not nums: raise ValueError("""List is empty""" ) return sum(A__ ) / len(A__ ) if __name__ == "__main__": import doctest doctest.testmod()
80
1
def lowerCamelCase__ ( A__ : int ): '''simple docstring''' if number < 0: raise ValueError("""number must not be negative""" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
80
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase): UpperCAmelCase__ : Any = 'maskformer-swin' UpperCAmelCase__ : List[Any] = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self: Any , UpperCamelCase_: Any=2_24 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Optional[int]=96 , UpperCamelCase_: List[str]=[2, 2, 6, 2] , UpperCamelCase_: Optional[Any]=[3, 6, 12, 24] , UpperCamelCase_: str=7 , UpperCamelCase_: int=4.0 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=1E-5 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] , ): super().__init__(**UpperCamelCase_ ) __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = num_channels __lowerCamelCase = embed_dim __lowerCamelCase = depths __lowerCamelCase = len(UpperCamelCase_ ) __lowerCamelCase = num_heads __lowerCamelCase = window_size __lowerCamelCase = mlp_ratio __lowerCamelCase = qkv_bias __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = drop_path_rate __lowerCamelCase = hidden_act __lowerCamelCase = use_absolute_embeddings __lowerCamelCase = layer_norm_eps __lowerCamelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowerCamelCase = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) ) __lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(UpperCamelCase_ ) + 1 )] __lowerCamelCase, __lowerCamelCase = get_aligned_output_features_output_indices( out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
80
1
import numpy as np class lowerCamelCase__: def __init__( self: Any ): __lowerCamelCase = (0, 0) __lowerCamelCase = None __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = 0 def __eq__( self: str , UpperCamelCase_: Tuple ): return self.position == cell.position def lowerCAmelCase__ ( self: Tuple ): print(self.position ) class lowerCamelCase__: def __init__( self: Tuple , UpperCamelCase_: List[str]=(5, 5) ): __lowerCamelCase = np.zeros(UpperCamelCase_ ) __lowerCamelCase = world_size[0] __lowerCamelCase = world_size[1] def lowerCAmelCase__ ( self: List[str] ): print(self.w ) def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any ): __lowerCamelCase = [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] __lowerCamelCase = cell.position[0] __lowerCamelCase = cell.position[1] __lowerCamelCase = [] for n in neughbour_cord: __lowerCamelCase = current_x + n[0] __lowerCamelCase = current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: __lowerCamelCase = Cell() __lowerCamelCase = (x, y) __lowerCamelCase = cell neighbours.append(UpperCamelCase_ ) return neighbours def lowerCamelCase__ ( A__ : Dict , A__ : int , A__ : Optional[int] ): '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = [] _open.append(A__ ) while _open: __lowerCamelCase = np.argmin([n.f for n in _open] ) __lowerCamelCase = _open[min_f] _closed.append(_open.pop(A__ ) ) if current == goal: break for n in world.get_neigbours(A__ ): for c in _closed: if c == n: continue __lowerCamelCase = current.g + 1 __lowerCamelCase, __lowerCamelCase = n.position __lowerCamelCase, __lowerCamelCase = goal.position __lowerCamelCase = (ya - ya) ** 2 + (xa - xa) ** 2 __lowerCamelCase = n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(A__ ) __lowerCamelCase = [] while current.parent is not None: path.append(current.position ) __lowerCamelCase = current.parent path.append(current.position ) return path[::-1] if __name__ == "__main__": UpperCAmelCase_ = Gridworld() # Start position and goal UpperCAmelCase_ = Cell() UpperCAmelCase_ = (0, 0) UpperCAmelCase_ = Cell() UpperCAmelCase_ = (4, 4) print(f"""path from {start.position} to {goal.position}""") UpperCAmelCase_ = astar(world, start, goal) # Just for visual reasons. for i in s: UpperCAmelCase_ = 1 print(world.w)
80
from __future__ import annotations def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): __lowerCamelCase, __lowerCamelCase = array[indexa], array[indexa] def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if length > 1: __lowerCamelCase = int(length / 2 ) for i in range(A__ , low + middle ): comp_and_swap(A__ , A__ , i + middle , A__ ) bitonic_merge(A__ , A__ , A__ , A__ ) bitonic_merge(A__ , low + middle , A__ , A__ ) def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if length > 1: __lowerCamelCase = int(length / 2 ) bitonic_sort(A__ , A__ , A__ , 1 ) bitonic_sort(A__ , low + middle , A__ , 0 ) bitonic_merge(A__ , A__ , A__ , A__ ) if __name__ == "__main__": UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(',')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('\nSorted array in ascending order is: ', end='') print(*unsorted, sep=', ') bitonic_merge(unsorted, 0, len(unsorted), 0) print('Sorted array in descending order is: ', end='') print(*unsorted, sep=', ')
80
1
from sklearn.metrics import matthews_corrcoef import datasets UpperCAmelCase_ = '\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n' UpperCAmelCase_ = '\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results[\'matthews_correlation\'], 2))\n -0.25\n' UpperCAmelCase_ = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class lowerCamelCase__( datasets.Metric): def lowerCAmelCase__ ( self: List[str] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=[ """https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html""" ] , ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str]=None ): return { "matthews_correlation": float(matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ , sample_weight=UpperCamelCase_ ) ), }
80
from ... import PretrainedConfig UpperCAmelCase_ = { 'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json', } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP UpperCAmelCase__ : Dict = 'nezha' def __init__( self: Dict , UpperCamelCase_: Any=2_11_28 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Optional[int]=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Union[str, Any]=5_12 , UpperCamelCase_: Any=64 , UpperCamelCase_: Dict=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: Optional[Any]=1E-12 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Any=0 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: str=True , **UpperCamelCase_: Any , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = max_relative_position __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = classifier_dropout __lowerCamelCase = use_cache
80
1
def lowerCamelCase__ ( A__ : Tuple ): '''simple docstring''' __lowerCamelCase = len(A__ ) while cur > 1: # Find the maximum number in arr __lowerCamelCase = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi __lowerCamelCase = arr[mi::-1] + arr[mi + 1 : len(A__ )] # Reverse whole list __lowerCamelCase = arr[cur - 1 :: -1] + arr[cur : len(A__ )] cur -= 1 return arr if __name__ == "__main__": UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase_ = [int(item) for item in user_input.split(',')] print(pancake_sort(unsorted))
80
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__: def __init__( self: Union[str, Any] , UpperCamelCase_: str = None , UpperCamelCase_: uuid.UUID = None , UpperCamelCase_: Dict=None , UpperCamelCase_: Any=None ): if not conversation_id: __lowerCamelCase = uuid.uuida() if past_user_inputs is None: __lowerCamelCase = [] if generated_responses is None: __lowerCamelCase = [] __lowerCamelCase = conversation_id __lowerCamelCase = past_user_inputs __lowerCamelCase = generated_responses __lowerCamelCase = text def __eq__( self: Optional[Any] , UpperCamelCase_: Union[str, Any] ): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowerCAmelCase__ ( self: int , UpperCamelCase_: str , UpperCamelCase_: bool = False ): if self.new_user_input: if overwrite: logger.warning( F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ' F'with: "{text}".' ) __lowerCamelCase = text else: logger.warning( F'User input added while unprocessed input was existing: "{self.new_user_input}" new input ' F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' ) else: __lowerCamelCase = text def lowerCAmelCase__ ( self: List[str] ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __lowerCamelCase = None def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ): self.generated_responses.append(UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self: Union[str, Any] ): __lowerCamelCase = F'Conversation id: {self.uuid} \n' for is_user, text in self.iter_texts(): __lowerCamelCase = """user""" if is_user else """bot""" output += F'{name} >> {text} \n' return output @add_end_docstrings( __lowerCamelCase , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , ) class lowerCamelCase__( __lowerCamelCase): def __init__( self: List[str] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: str ): super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) if self.tokenizer.pad_token_id is None: __lowerCamelCase = self.tokenizer.eos_token def lowerCAmelCase__ ( self: str , UpperCamelCase_: int=None , UpperCamelCase_: Any=None , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: int ): __lowerCamelCase = {} __lowerCamelCase = {} __lowerCamelCase = {} if min_length_for_response is not None: __lowerCamelCase = min_length_for_response if minimum_tokens is not None: __lowerCamelCase = minimum_tokens if "max_length" in generate_kwargs: __lowerCamelCase = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __lowerCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(UpperCamelCase_ ) return preprocess_params, forward_params, postprocess_params def __call__( self: Any , UpperCamelCase_: Union[Conversation, List[Conversation]] , UpperCamelCase_: Optional[int]=0 , **UpperCamelCase_: Optional[int] ): __lowerCamelCase = super().__call__(UpperCamelCase_ , num_workers=UpperCamelCase_ , **UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1: return outputs[0] return outputs def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Conversation , UpperCamelCase_: Optional[Any]=32 ): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ' """Add user inputs with the conversation's `add_user_input` method""" ) if hasattr(self.tokenizer , """_build_conversation_input_ids""" ): __lowerCamelCase = self.tokenizer._build_conversation_input_ids(UpperCamelCase_ ) else: # If the tokenizer cannot handle conversations, we default to only the old version __lowerCamelCase = self._legacy_parse_and_tokenize(UpperCamelCase_ ) if self.framework == "pt": __lowerCamelCase = torch.LongTensor([input_ids] ) elif self.framework == "tf": __lowerCamelCase = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str=10 , **UpperCamelCase_: List[str] ): __lowerCamelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length ) __lowerCamelCase = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' ) __lowerCamelCase = max_length - minimum_tokens __lowerCamelCase = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: __lowerCamelCase = model_inputs["""attention_mask"""][:, -trim:] __lowerCamelCase = model_inputs.pop("""conversation""" ) __lowerCamelCase = max_length __lowerCamelCase = self.model.generate(**UpperCamelCase_ , **UpperCamelCase_ ) if self.model.config.is_encoder_decoder: __lowerCamelCase = 1 else: __lowerCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int=True ): __lowerCamelCase = model_outputs["""output_ids"""] __lowerCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , ) __lowerCamelCase = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(UpperCamelCase_ ) return conversation def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Conversation ): __lowerCamelCase = self.tokenizer.eos_token_id __lowerCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) if len(UpperCamelCase_ ) > self.tokenizer.model_max_length: __lowerCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
80
1
import os import sys import unittest UpperCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) UpperCAmelCase_ = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py') UpperCAmelCase_ = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py') class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: str ): __lowerCamelCase = get_test_to_tester_mapping(UpperCamelCase_ ) __lowerCamelCase = get_test_to_tester_mapping(UpperCamelCase_ ) __lowerCamelCase = {"""BertModelTest""": """BertModelTester"""} __lowerCamelCase = { """BlipModelTest""": """BlipModelTester""", """BlipTextImageModelTest""": """BlipTextImageModelsModelTester""", """BlipTextModelTest""": """BlipTextModelTester""", """BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""", """BlipVQAModelTest""": """BlipVQAModelTester""", """BlipVisionModelTest""": """BlipVisionModelTester""", } self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = get_model_to_test_mapping(UpperCamelCase_ ) __lowerCamelCase = get_model_to_test_mapping(UpperCamelCase_ ) __lowerCamelCase = { """BertForMaskedLM""": ["""BertModelTest"""], """BertForMultipleChoice""": ["""BertModelTest"""], """BertForNextSentencePrediction""": ["""BertModelTest"""], """BertForPreTraining""": ["""BertModelTest"""], """BertForQuestionAnswering""": ["""BertModelTest"""], """BertForSequenceClassification""": ["""BertModelTest"""], """BertForTokenClassification""": ["""BertModelTest"""], """BertLMHeadModel""": ["""BertModelTest"""], """BertModel""": ["""BertModelTest"""], } __lowerCamelCase = { """BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTest"""], """BlipModel""": ["""BlipModelTest"""], """BlipTextModel""": ["""BlipTextModelTest"""], """BlipVisionModel""": ["""BlipVisionModelTest"""], } self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = get_model_to_tester_mapping(UpperCamelCase_ ) __lowerCamelCase = get_model_to_tester_mapping(UpperCamelCase_ ) __lowerCamelCase = { """BertForMaskedLM""": ["""BertModelTester"""], """BertForMultipleChoice""": ["""BertModelTester"""], """BertForNextSentencePrediction""": ["""BertModelTester"""], """BertForPreTraining""": ["""BertModelTester"""], """BertForQuestionAnswering""": ["""BertModelTester"""], """BertForSequenceClassification""": ["""BertModelTester"""], """BertForTokenClassification""": ["""BertModelTester"""], """BertLMHeadModel""": ["""BertModelTester"""], """BertModel""": ["""BertModelTester"""], } __lowerCamelCase = { """BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTester"""], """BlipModel""": ["""BlipModelTester"""], """BlipTextModel""": ["""BlipTextModelTester"""], """BlipVisionModel""": ["""BlipVisionModelTester"""], } self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ )
80
import math def lowerCamelCase__ ( A__ : int ): '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = 2 __lowerCamelCase = int(math.sqrt(A__ ) ) # Size of every segment __lowerCamelCase = [True] * (end + 1) __lowerCamelCase = [] while start <= end: if temp[start] is True: in_prime.append(A__ ) for i in range(start * start , end + 1 , A__ ): __lowerCamelCase = False start += 1 prime += in_prime __lowerCamelCase = end + 1 __lowerCamelCase = min(2 * end , A__ ) while low <= n: __lowerCamelCase = [True] * (high - low + 1) for each in in_prime: __lowerCamelCase = math.floor(low / each ) * each if t < low: t += each for j in range(A__ , high + 1 , A__ ): __lowerCamelCase = False for j in range(len(A__ ) ): if temp[j] is True: prime.append(j + low ) __lowerCamelCase = high + 1 __lowerCamelCase = min(high + end , A__ ) return prime print(sieve(10**6))
80
1
from pathlib import Path import fire def lowerCamelCase__ ( A__ : str , A__ : str , A__ : int ): '''simple docstring''' __lowerCamelCase = Path(A__ ) __lowerCamelCase = Path(A__ ) dest_dir.mkdir(exist_ok=A__ ) for path in src_dir.iterdir(): __lowerCamelCase = [x.rstrip() for x in list(path.open().readlines() )][:n] __lowerCamelCase = dest_dir.joinpath(path.name ) print(A__ ) dest_path.open("""w""" ).write("""\n""".join(A__ ) ) if __name__ == "__main__": fire.Fire(minify)
80
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model') class lowerCamelCase__( __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : int = BartphoTokenizer UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : List[str] = True def lowerCAmelCase__ ( self: Tuple ): super().setUp() __lowerCamelCase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] __lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) __lowerCamelCase = {"""unk_token""": """<unk>"""} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] ) with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp: for token in vocab_tokens: fp.write(F'{token} {vocab_tokens[token]}\n' ) __lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: List[str] ): kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ): __lowerCamelCase = """This is a là test""" __lowerCamelCase = """This is a<unk><unk> test""" return input_text, output_text def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map ) __lowerCamelCase = """This is a là test""" __lowerCamelCase = """▁This ▁is ▁a ▁l à ▁t est""".split() __lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __lowerCamelCase = tokens + [tokenizer.unk_token] __lowerCamelCase = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
80
1
import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n' UpperCAmelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n' UpperCAmelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n' UpperCAmelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n' UpperCAmelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class lowerCamelCase__( datasets.Metric): def lowerCAmelCase__ ( self: int ): return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]=[1, 10, 1_00] , UpperCamelCase_: Any=4 , UpperCamelCase_: List[Any]=3.0 ): if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""" ) with ThreadPoolExecutor(max_workers=UpperCamelCase_ ) as executor: __lowerCamelCase = [] __lowerCamelCase = Counter() __lowerCamelCase = 0 __lowerCamelCase = defaultdict(UpperCamelCase_ ) for task_id, (candidates, test_case) in enumerate(zip(UpperCamelCase_ , UpperCamelCase_ ) ): for candidate in candidates: __lowerCamelCase = candidate + """\n""" + test_case __lowerCamelCase = (test_program, timeout, task_id, completion_id[task_id]) __lowerCamelCase = executor.submit(UpperCamelCase_ , *UpperCamelCase_ ) futures.append(UpperCamelCase_ ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(UpperCamelCase_ ): __lowerCamelCase = future.result() results[result["task_id"]].append((result["""completion_id"""], result) ) __lowerCamelCase, __lowerCamelCase = [], [] for result in results.values(): result.sort() __lowerCamelCase = [r[1]["""passed"""] for r in result] total.append(len(UpperCamelCase_ ) ) correct.append(sum(UpperCamelCase_ ) ) __lowerCamelCase = np.array(UpperCamelCase_ ) __lowerCamelCase = np.array(UpperCamelCase_ ) __lowerCamelCase = k __lowerCamelCase = {F'pass@{k}': estimate_pass_at_k(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Any , A__ : Optional[Any] ): '''simple docstring''' def estimator(A__ : int , A__ : int , A__ : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(A__ , A__ ): __lowerCamelCase = itertools.repeat(A__ , len(A__ ) ) else: assert len(A__ ) == len(A__ ) __lowerCamelCase = iter(A__ ) return np.array([estimator(int(A__ ) , int(A__ ) , A__ ) for n, c in zip(A__ , A__ )] )
80
def lowerCamelCase__ ( A__ : dict ): '''simple docstring''' __lowerCamelCase = set() # To detect a back edge, keep track of vertices currently in the recursion stack __lowerCamelCase = set() return any( node not in visited and depth_first_search(A__ , A__ , A__ , A__ ) for node in graph ) def lowerCamelCase__ ( A__ : dict , A__ : int , A__ : set , A__ : set ): '''simple docstring''' visited.add(A__ ) rec_stk.add(A__ ) for node in graph[vertex]: if node not in visited: if depth_first_search(A__ , A__ , A__ , A__ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(A__ ) return False if __name__ == "__main__": from doctest import testmod testmod()
80
1
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def lowerCamelCase__ ( A__ : str , A__ : float | Decimal , A__ : float = 10**-10 ): '''simple docstring''' __lowerCamelCase = a while True: __lowerCamelCase = Decimal(A__ ) - ( Decimal(eval(A__ ) ) / Decimal(eval(str(diff(A__ ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(A__ ) ) < precision: # noqa: S307 return float(A__ ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""") # Find root of polynomial print(f"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""") # Find Square Root of 5 print(f"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""") # Exponential Roots print(f"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
80
from __future__ import annotations def lowerCamelCase__ ( A__ : list[float] , A__ : list[float] ): '''simple docstring''' __lowerCamelCase = sorted(numsa + numsa ) __lowerCamelCase, __lowerCamelCase = divmod(len(A__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = [float(x) for x in input('Enter the elements of first array: ').split()] UpperCAmelCase_ = [float(x) for x in input('Enter the elements of second array: ').split()] print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
80
1
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = r'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n' class lowerCamelCase__( __lowerCamelCase): @add_start_docstrings(UpperCamelCase_ ) def __call__( self: Tuple , UpperCamelCase_: torch.LongTensor , UpperCamelCase_: torch.FloatTensor , **UpperCamelCase_: List[str] ): raise NotImplementedError("""StoppingCriteria needs to be subclassed""" ) class lowerCamelCase__( __lowerCamelCase): def __init__( self: str , UpperCamelCase_: int , UpperCamelCase_: Optional[int] = None ): __lowerCamelCase = max_length __lowerCamelCase = max_position_embeddings @add_start_docstrings(UpperCamelCase_ ) def __call__( self: Union[str, Any] , UpperCamelCase_: torch.LongTensor , UpperCamelCase_: torch.FloatTensor , **UpperCamelCase_: Union[str, Any] ): __lowerCamelCase = input_ids.shape[-1] __lowerCamelCase = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( """This is a friendly reminder - the current text generation call will exceed the model's predefined """ F'maximum length ({self.max_position_embeddings}). Depending on the model, you may observe ' """exceptions, performance degradation, or nothing at all.""" ) return is_done class lowerCamelCase__( __lowerCamelCase): def __init__( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int ): warnings.warn( """The class `MaxNewTokensCriteria` is deprecated. """ F'Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` ' """with `max_length = start_length + max_new_tokens` instead.""" , UpperCamelCase_ , ) __lowerCamelCase = start_length __lowerCamelCase = max_new_tokens __lowerCamelCase = start_length + max_new_tokens @add_start_docstrings(UpperCamelCase_ ) def __call__( self: List[str] , UpperCamelCase_: torch.LongTensor , UpperCamelCase_: torch.FloatTensor , **UpperCamelCase_: List[Any] ): return input_ids.shape[-1] >= self.max_length class lowerCamelCase__( __lowerCamelCase): def __init__( self: Optional[int] , UpperCamelCase_: float , UpperCamelCase_: Optional[float] = None ): __lowerCamelCase = max_time __lowerCamelCase = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(UpperCamelCase_ ) def __call__( self: Optional[Any] , UpperCamelCase_: torch.LongTensor , UpperCamelCase_: torch.FloatTensor , **UpperCamelCase_: Tuple ): return time.time() - self.initial_timestamp > self.max_time class lowerCamelCase__( __lowerCamelCase): @add_start_docstrings(UpperCamelCase_ ) def __call__( self: Dict , UpperCamelCase_: torch.LongTensor , UpperCamelCase_: torch.FloatTensor , **UpperCamelCase_: str ): return any(criteria(UpperCamelCase_ , UpperCamelCase_ ) for criteria in self ) @property def lowerCAmelCase__ ( self: str ): for stopping_criterium in self: if isinstance(UpperCamelCase_ , UpperCamelCase_ ): return stopping_criterium.max_length elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): return stopping_criterium.max_length return None def lowerCamelCase__ ( A__ : StoppingCriteriaList , A__ : int ): '''simple docstring''' __lowerCamelCase = stopping_criteria.max_length __lowerCamelCase = deepcopy(A__ ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , A__ ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=A__ ) ) return new_stopping_criteria
80
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: str ): __lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) __lowerCamelCase = get_activation("""gelu""" ) self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) ) self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) ) def lowerCAmelCase__ ( self: str ): __lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) __lowerCamelCase = get_activation("""gelu""" ) __lowerCamelCase = get_activation("""gelu_10""" ) __lowerCamelCase = torch_builtin(UpperCamelCase_ ) __lowerCamelCase = geluaa(UpperCamelCase_ ) __lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def lowerCAmelCase__ ( self: str ): get_activation("""gelu""" ) get_activation("""gelu_10""" ) get_activation("""gelu_fast""" ) get_activation("""gelu_new""" ) get_activation("""gelu_python""" ) get_activation("""gelu_pytorch_tanh""" ) get_activation("""linear""" ) get_activation("""mish""" ) get_activation("""quick_gelu""" ) get_activation("""relu""" ) get_activation("""sigmoid""" ) get_activation("""silu""" ) get_activation("""swish""" ) get_activation("""tanh""" ) with self.assertRaises(UpperCamelCase_ ): get_activation("""bogus""" ) with self.assertRaises(UpperCamelCase_ ): get_activation(UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = get_activation("""gelu""" ) __lowerCamelCase = 1 __lowerCamelCase = get_activation("""gelu""" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(UpperCamelCase_ ): __lowerCamelCase = acta.a
80
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ = { 'configuration_longformer': [ 'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongformerConfig', 'LongformerOnnxConfig', ], 'tokenization_longformer': ['LongformerTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['LongformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'LongformerForMaskedLM', 'LongformerForMultipleChoice', 'LongformerForQuestionAnswering', 'LongformerForSequenceClassification', 'LongformerForTokenClassification', 'LongformerModel', 'LongformerPreTrainedModel', 'LongformerSelfAttention', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLongformerForMaskedLM', 'TFLongformerForMultipleChoice', 'TFLongformerForQuestionAnswering', 'TFLongformerForSequenceClassification', 'TFLongformerForTokenClassification', 'TFLongformerModel', 'TFLongformerPreTrainedModel', 'TFLongformerSelfAttention', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
80
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class lowerCamelCase__( __lowerCamelCase): @slow @require_torch def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) __lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" ) __lowerCamelCase = bertabert.config.encoder.vocab_size __lowerCamelCase = tokenizer.sep_token_id __lowerCamelCase = tokenizer.cls_token_id __lowerCamelCase = 1_28 __lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) __lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) __lowerCamelCase = train_dataset.select(range(32 ) ) __lowerCamelCase = val_dataset.select(range(16 ) ) __lowerCamelCase = 4 def _map_to_encoder_decoder_inputs(UpperCamelCase_: List[Any] ): # Tokenizer will automatically set [BOS] <text> [EOS] __lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=5_12 ) __lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=1_28 ) __lowerCamelCase = inputs.input_ids __lowerCamelCase = inputs.attention_mask __lowerCamelCase = outputs.input_ids __lowerCamelCase = outputs.input_ids.copy() __lowerCamelCase = [ [-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] __lowerCamelCase = outputs.attention_mask assert all(len(UpperCamelCase_ ) == 5_12 for x in inputs.input_ids ) assert all(len(UpperCamelCase_ ) == 1_28 for x in outputs.input_ids ) return batch def _compute_metrics(UpperCamelCase_: int ): __lowerCamelCase = pred.label_ids __lowerCamelCase = pred.predictions # all unnecessary tokens are removed __lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) __lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) __lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ ) return {"accuracy": accuracy} # map train dataset __lowerCamelCase = train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset __lowerCamelCase = val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) __lowerCamelCase = self.get_auto_remove_tmp_dir() __lowerCamelCase = SeqaSeqTrainingArguments( output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy="""steps""" , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer __lowerCamelCase = SeqaSeqTrainer( model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , ) # start training trainer.train()
80
1
UpperCAmelCase_ = 'Alexander Joslin' import operator as op from .stack import Stack def lowerCamelCase__ ( A__ : str ): '''simple docstring''' __lowerCamelCase = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub} __lowerCamelCase = Stack() __lowerCamelCase = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(A__ ) ) elif i in operators: # RULE 2 operator_stack.push(A__ ) elif i == ")": # RULE 4 __lowerCamelCase = operator_stack.peek() operator_stack.pop() __lowerCamelCase = operand_stack.peek() operand_stack.pop() __lowerCamelCase = operand_stack.peek() operand_stack.pop() __lowerCamelCase = operators[opr](A__ , A__ ) operand_stack.push(A__ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": UpperCAmelCase_ = '(5 + ((4 * 2) * (2 + 3)))' # answer = 45 print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
80
class lowerCamelCase__: # Public class to implement a graph def __init__( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ): __lowerCamelCase = row __lowerCamelCase = col __lowerCamelCase = graph def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ): return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ): # Checking all 8 elements surrounding nth element __lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1] __lowerCamelCase = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] ): # And finally, count all islands. __lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )] __lowerCamelCase = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) count += 1 return count
80
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json', } class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase): UpperCAmelCase__ : str = 'convnextv2' def __init__( self: Optional[Any] , UpperCamelCase_: int=3 , UpperCamelCase_: Dict=4 , UpperCamelCase_: int=4 , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Union[str, Any]=0.02 , UpperCamelCase_: Any=1E-12 , UpperCamelCase_: List[str]=0.0 , UpperCamelCase_: Optional[int]=2_24 , UpperCamelCase_: List[str]=None , UpperCamelCase_: Any=None , **UpperCamelCase_: int , ): super().__init__(**UpperCamelCase_ ) __lowerCamelCase = num_channels __lowerCamelCase = patch_size __lowerCamelCase = num_stages __lowerCamelCase = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes __lowerCamelCase = [3, 3, 9, 3] if depths is None else depths __lowerCamelCase = hidden_act __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = drop_path_rate __lowerCamelCase = image_size __lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )] __lowerCamelCase, __lowerCamelCase = get_aligned_output_features_output_indices( out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
80
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) def lowerCamelCase__ ( A__ : str ): '''simple docstring''' __lowerCamelCase = DPTConfig() if "large" in checkpoint_url: __lowerCamelCase = 1024 __lowerCamelCase = 4096 __lowerCamelCase = 24 __lowerCamelCase = 16 __lowerCamelCase = [5, 11, 17, 23] __lowerCamelCase = [256, 512, 1024, 1024] __lowerCamelCase = (1, 384, 384) if "ade" in checkpoint_url: __lowerCamelCase = True __lowerCamelCase = 150 __lowerCamelCase = """huggingface/label-files""" __lowerCamelCase = """ade20k-id2label.json""" __lowerCamelCase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="""dataset""" ) ) , """r""" ) ) __lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()} __lowerCamelCase = idalabel __lowerCamelCase = {v: k for k, v in idalabel.items()} __lowerCamelCase = [1, 150, 480, 480] return config, expected_shape def lowerCamelCase__ ( A__ : Dict ): '''simple docstring''' __lowerCamelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(A__ , A__ ) def lowerCamelCase__ ( A__ : Dict ): '''simple docstring''' if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): __lowerCamelCase = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: __lowerCamelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: __lowerCamelCase = name.replace("""patch_embed""" , """patch_embeddings""" ) if "pos_embed" in name: __lowerCamelCase = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: __lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: __lowerCamelCase = name.replace("""proj""" , """projection""" ) if "blocks" in name: __lowerCamelCase = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: __lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name: __lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: __lowerCamelCase = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: __lowerCamelCase = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: __lowerCamelCase = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: __lowerCamelCase = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: __lowerCamelCase = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: __lowerCamelCase = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: __lowerCamelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 __lowerCamelCase = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' ) if "out_conv" in name: __lowerCamelCase = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: __lowerCamelCase = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: __lowerCamelCase = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: __lowerCamelCase = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: __lowerCamelCase = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: __lowerCamelCase = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: __lowerCamelCase = name.replace("""bn""" , """batch_norm""" ) if "head" in name: __lowerCamelCase = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: __lowerCamelCase = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: __lowerCamelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" ) return name def lowerCamelCase__ ( A__ : Tuple , A__ : Any ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' ) __lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __lowerCamelCase = in_proj_weight[: config.hidden_size, :] __lowerCamelCase = in_proj_bias[: config.hidden_size] __lowerCamelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __lowerCamelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __lowerCamelCase = in_proj_weight[ -config.hidden_size :, : ] __lowerCamelCase = in_proj_bias[-config.hidden_size :] def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def lowerCamelCase__ ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : List[str] , A__ : Union[str, Any] ): '''simple docstring''' __lowerCamelCase, __lowerCamelCase = get_dpt_config(A__ ) # load original state_dict from URL __lowerCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(A__ ) # rename keys for key in state_dict.copy().keys(): __lowerCamelCase = state_dict.pop(A__ ) __lowerCamelCase = val # read in qkv matrices read_in_q_k_v(A__ , A__ ) # load HuggingFace model __lowerCamelCase = DPTForSemanticSegmentation(A__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(A__ ) model.load_state_dict(A__ ) model.eval() # Check outputs on an image __lowerCamelCase = 480 if """ade""" in checkpoint_url else 384 __lowerCamelCase = DPTImageProcessor(size=A__ ) __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(A__ , return_tensors="""pt""" ) # forward pass __lowerCamelCase = model(**A__ ).logits if """ade""" in checkpoint_url else model(**A__ ).predicted_depth # Assert logits __lowerCamelCase = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] ) if "ade" in checkpoint_url: __lowerCamelCase = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] ) assert outputs.shape == torch.Size(A__ ) assert ( torch.allclose(outputs[0, 0, :3, :3] , A__ , atol=1E-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , A__ ) ) Path(A__ ).mkdir(exist_ok=A__ ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(A__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(A__ ) if push_to_hub: print("""Pushing model to hub...""" ) model.push_to_hub( repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=A__ , ) image_processor.push_to_hub( repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=A__ , ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) UpperCAmelCase_ = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
80
1
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = TypeVar('DatasetType', Dataset, IterableDataset) def lowerCamelCase__ ( A__ : List[DatasetType] , A__ : Optional[List[float]] = None , A__ : Optional[int] = None , A__ : Optional[DatasetInfo] = None , A__ : Optional[NamedSplit] = None , A__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ): '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("""Unable to interleave an empty list of datasets.""" ) for i, dataset in enumerate(A__ ): if not isinstance(A__ , (Dataset, IterableDataset) ): if isinstance(A__ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' """is an empty dataset dictionary.""" ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A__ )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A__ ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A__ ).__name__}.' ) if i == 0: __lowerCamelCase, __lowerCamelCase = ( (Dataset, IterableDataset) if isinstance(A__ , A__ ) else (IterableDataset, Dataset) ) elif not isinstance(A__ , A__ ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' ) if dataset_type is Dataset: return _interleave_map_style_datasets( A__ , A__ , A__ , info=A__ , split=A__ , stopping_strategy=A__ ) else: return _interleave_iterable_datasets( A__ , A__ , A__ , info=A__ , split=A__ , stopping_strategy=A__ ) def lowerCamelCase__ ( A__ : List[DatasetType] , A__ : Optional[DatasetInfo] = None , A__ : Optional[NamedSplit] = None , A__ : int = 0 , ): '''simple docstring''' if not dsets: raise ValueError("""Unable to concatenate an empty list of datasets.""" ) for i, dataset in enumerate(A__ ): if not isinstance(A__ , (Dataset, IterableDataset) ): if isinstance(A__ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' """is an empty dataset dictionary.""" ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A__ )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A__ ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A__ ).__name__}.' ) if i == 0: __lowerCamelCase, __lowerCamelCase = ( (Dataset, IterableDataset) if isinstance(A__ , A__ ) else (IterableDataset, Dataset) ) elif not isinstance(A__ , A__ ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(A__ , info=A__ , split=A__ , axis=A__ ) else: return _concatenate_iterable_datasets(A__ , info=A__ , split=A__ , axis=A__ )
80
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
80
1
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig UpperCAmelCase_ = logging.get_logger(__name__) # General docstring UpperCAmelCase_ = 'ResNetConfig' # Base docstring UpperCAmelCase_ = 'microsoft/resnet-50' UpperCAmelCase_ = [1, 2_048, 7, 7] # Image classification docstring UpperCAmelCase_ = 'microsoft/resnet-50' UpperCAmelCase_ = 'tiger cat' UpperCAmelCase_ = [ 'microsoft/resnet-50', # See all resnet models at https://huggingface.co/models?filter=resnet ] class lowerCamelCase__( nn.Module): def __init__( self: Any , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int = 3 , UpperCamelCase_: int = 1 , UpperCamelCase_: str = "relu" ): super().__init__() __lowerCamelCase = nn.Convad( UpperCamelCase_ , UpperCamelCase_ , kernel_size=UpperCamelCase_ , stride=UpperCamelCase_ , padding=kernel_size // 2 , bias=UpperCamelCase_ ) __lowerCamelCase = nn.BatchNormad(UpperCamelCase_ ) __lowerCamelCase = ACTaFN[activation] if activation is not None else nn.Identity() def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Tensor ): __lowerCamelCase = self.convolution(UpperCamelCase_ ) __lowerCamelCase = self.normalization(UpperCamelCase_ ) __lowerCamelCase = self.activation(UpperCamelCase_ ) return hidden_state class lowerCamelCase__( nn.Module): def __init__( self: Union[str, Any] , UpperCamelCase_: ResNetConfig ): super().__init__() __lowerCamelCase = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) __lowerCamelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) __lowerCamelCase = config.num_channels def lowerCAmelCase__ ( self: str , UpperCamelCase_: Tensor ): __lowerCamelCase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( """Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" ) __lowerCamelCase = self.embedder(UpperCamelCase_ ) __lowerCamelCase = self.pooler(UpperCamelCase_ ) return embedding class lowerCamelCase__( nn.Module): def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int = 2 ): super().__init__() __lowerCamelCase = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , kernel_size=1 , stride=UpperCamelCase_ , bias=UpperCamelCase_ ) __lowerCamelCase = nn.BatchNormad(UpperCamelCase_ ) def lowerCAmelCase__ ( self: int , UpperCamelCase_: Tensor ): __lowerCamelCase = self.convolution(UpperCamelCase_ ) __lowerCamelCase = self.normalization(UpperCamelCase_ ) return hidden_state class lowerCamelCase__( nn.Module): def __init__( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int = 1 , UpperCamelCase_: str = "relu" ): super().__init__() __lowerCamelCase = in_channels != out_channels or stride != 1 __lowerCamelCase = ( ResNetShortCut(UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ ) if should_apply_shortcut else nn.Identity() ) __lowerCamelCase = nn.Sequential( ResNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ ) , ResNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , activation=UpperCamelCase_ ) , ) __lowerCamelCase = ACTaFN[activation] def lowerCAmelCase__ ( self: str , UpperCamelCase_: Tuple ): __lowerCamelCase = hidden_state __lowerCamelCase = self.layer(UpperCamelCase_ ) __lowerCamelCase = self.shortcut(UpperCamelCase_ ) hidden_state += residual __lowerCamelCase = self.activation(UpperCamelCase_ ) return hidden_state class lowerCamelCase__( nn.Module): def __init__( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int = 1 , UpperCamelCase_: str = "relu" , UpperCamelCase_: int = 4 ): super().__init__() __lowerCamelCase = in_channels != out_channels or stride != 1 __lowerCamelCase = out_channels // reduction __lowerCamelCase = ( ResNetShortCut(UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ ) if should_apply_shortcut else nn.Identity() ) __lowerCamelCase = nn.Sequential( ResNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , kernel_size=1 ) , ResNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ ) , ResNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , kernel_size=1 , activation=UpperCamelCase_ ) , ) __lowerCamelCase = ACTaFN[activation] def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[str] ): __lowerCamelCase = hidden_state __lowerCamelCase = self.layer(UpperCamelCase_ ) __lowerCamelCase = self.shortcut(UpperCamelCase_ ) hidden_state += residual __lowerCamelCase = self.activation(UpperCamelCase_ ) return hidden_state class lowerCamelCase__( nn.Module): def __init__( self: Union[str, Any] , UpperCamelCase_: ResNetConfig , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , ): super().__init__() __lowerCamelCase = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer __lowerCamelCase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ , activation=config.hidden_act ) , *[layer(UpperCamelCase_ , UpperCamelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Tensor ): __lowerCamelCase = input for layer in self.layers: __lowerCamelCase = layer(UpperCamelCase_ ) return hidden_state class lowerCamelCase__( nn.Module): def __init__( self: Any , UpperCamelCase_: ResNetConfig ): super().__init__() __lowerCamelCase = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( UpperCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) __lowerCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(UpperCamelCase_ , config.depths[1:] ): self.stages.append(ResNetStage(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , depth=UpperCamelCase_ ) ) def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Tensor , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True ): __lowerCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __lowerCamelCase = hidden_states + (hidden_state,) __lowerCamelCase = stage_module(UpperCamelCase_ ) if output_hidden_states: __lowerCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=UpperCamelCase_ , hidden_states=UpperCamelCase_ , ) class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : str = ResNetConfig UpperCAmelCase__ : Any = 'resnet' UpperCAmelCase__ : str = 'pixel_values' UpperCAmelCase__ : Optional[Any] = True def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Any ): if isinstance(UpperCamelCase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" ) elif isinstance(UpperCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=False ): if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowerCamelCase = value UpperCAmelCase_ = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' UpperCAmelCase_ = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( 'The bare ResNet model outputting raw features without any specific head on top.' , __lowerCamelCase , ) class lowerCamelCase__( __lowerCamelCase): def __init__( self: int , UpperCamelCase_: Optional[Any] ): super().__init__(UpperCamelCase_ ) __lowerCamelCase = config __lowerCamelCase = ResNetEmbeddings(UpperCamelCase_ ) __lowerCamelCase = ResNetEncoder(UpperCamelCase_ ) __lowerCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCamelCase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Tensor , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[bool] = None ): __lowerCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase = self.embedder(UpperCamelCase_ ) __lowerCamelCase = self.encoder( UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ ) __lowerCamelCase = encoder_outputs[0] __lowerCamelCase = self.pooler(UpperCamelCase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=UpperCamelCase_ , pooler_output=UpperCamelCase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( '\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __lowerCamelCase , ) class lowerCamelCase__( __lowerCamelCase): def __init__( self: str , UpperCamelCase_: int ): super().__init__(UpperCamelCase_ ) __lowerCamelCase = config.num_labels __lowerCamelCase = ResNetModel(UpperCamelCase_ ) # classification head __lowerCamelCase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCamelCase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[torch.LongTensor] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[bool] = None , ): __lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase = self.resnet(UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ ) __lowerCamelCase = outputs.pooler_output if return_dict else outputs[1] __lowerCamelCase = self.classifier(UpperCamelCase_ ) __lowerCamelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowerCamelCase = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowerCamelCase = """single_label_classification""" else: __lowerCamelCase = """multi_label_classification""" if self.config.problem_type == "regression": __lowerCamelCase = MSELoss() if self.num_labels == 1: __lowerCamelCase = loss_fct(logits.squeeze() , labels.squeeze() ) else: __lowerCamelCase = loss_fct(UpperCamelCase_ , UpperCamelCase_ ) elif self.config.problem_type == "single_label_classification": __lowerCamelCase = CrossEntropyLoss() __lowerCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __lowerCamelCase = BCEWithLogitsLoss() __lowerCamelCase = loss_fct(UpperCamelCase_ , UpperCamelCase_ ) if not return_dict: __lowerCamelCase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase_ , logits=UpperCamelCase_ , hidden_states=outputs.hidden_states ) @add_start_docstrings( '\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , __lowerCamelCase , ) class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase): def __init__( self: Dict , UpperCamelCase_: Optional[Any] ): super().__init__(UpperCamelCase_ ) super()._init_backbone(UpperCamelCase_ ) __lowerCamelCase = [config.embedding_size] + config.hidden_sizes __lowerCamelCase = ResNetEmbeddings(UpperCamelCase_ ) __lowerCamelCase = ResNetEncoder(UpperCamelCase_ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCamelCase_ ) @replace_return_docstrings(output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Tensor , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[bool] = None ): __lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase = self.embedder(UpperCamelCase_ ) __lowerCamelCase = self.encoder(UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ ) __lowerCamelCase = outputs.hidden_states __lowerCamelCase = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: __lowerCamelCase = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=UpperCamelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=UpperCamelCase_ , )
80
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Tuple = 'bert' def __init__( self: List[str] , UpperCamelCase_: str=3_05_22 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=5_12 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: List[str]=1E-12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: Tuple=True , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Optional[Any] , ): super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache __lowerCamelCase = classifier_dropout class lowerCamelCase__( __lowerCamelCase): @property def lowerCAmelCase__ ( self: Any ): if self.task == "multiple-choice": __lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __lowerCamelCase = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
80
1
import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def lowerCamelCase__ ( A__ : Tuple , A__ : str , A__ : Any , A__ : Optional[int] ): '''simple docstring''' __lowerCamelCase = s.rsplit(A__ , A__ ) return new.join(A__ ) def lowerCamelCase__ ( A__ : str ): '''simple docstring''' return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def lowerCamelCase__ ( A__ : Union[str, Any] ): '''simple docstring''' __lowerCamelCase = {} __lowerCamelCase = ["""group_1""", """group_2""", """group_3""", """group_4"""] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: __lowerCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' ) if "res_path" in key: __lowerCamelCase = key.replace("""res_path.""" , """res_path.path.""" ) if key.endswith(""".w""" ): __lowerCamelCase = rreplace(A__ , """.w""" , """.weight""" , 1 ) if key.endswith(""".b""" ): __lowerCamelCase = rreplace(A__ , """.b""" , """.bias""" , 1 ) __lowerCamelCase = value.float() return upgrade @torch.no_grad() def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] , A__ : int=None , A__ : Optional[Any]=True ): '''simple docstring''' from dall_e import Encoder __lowerCamelCase = Encoder() if os.path.exists(A__ ): __lowerCamelCase = torch.load(A__ ) else: __lowerCamelCase = torch.hub.load_state_dict_from_url(A__ ) if isinstance(A__ , A__ ): __lowerCamelCase = ckpt.state_dict() encoder.load_state_dict(A__ ) if config_path is not None: __lowerCamelCase = FlavaImageCodebookConfig.from_pretrained(A__ ) else: __lowerCamelCase = FlavaImageCodebookConfig() __lowerCamelCase = FlavaImageCodebook(A__ ).eval() __lowerCamelCase = encoder.state_dict() __lowerCamelCase = upgrade_state_dict(A__ ) hf_model.load_state_dict(A__ ) __lowerCamelCase = hf_model.state_dict() __lowerCamelCase = count_parameters(A__ ) __lowerCamelCase = count_parameters(A__ ) assert torch.allclose(A__ , A__ , atol=1E-3 ) if save_checkpoint: hf_model.save_pretrained(A__ ) else: return hf_state_dict if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') UpperCAmelCase_ = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
80
from __future__ import annotations from math import ceil, floor, sqrt def lowerCamelCase__ ( A__ : int = 2000000 ): '''simple docstring''' __lowerCamelCase = [0] __lowerCamelCase = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target __lowerCamelCase = 0 # the area corresponding to the grid that gives the product closest to target __lowerCamelCase = 0 # an estimate of b, using the quadratic formula __lowerCamelCase = 42 # the largest integer less than b_estimate __lowerCamelCase = 42 # the largest integer less than b_estimate __lowerCamelCase = 42 # the triangle number corresponding to b_floor __lowerCamelCase = 42 # the triangle number corresponding to b_ceil __lowerCamelCase = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): __lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 __lowerCamelCase = floor(A__ ) __lowerCamelCase = ceil(A__ ) __lowerCamelCase = triangle_numbers[b_floor] __lowerCamelCase = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): __lowerCamelCase = triangle_b_first_guess * triangle_a __lowerCamelCase = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): __lowerCamelCase = triangle_b_second_guess * triangle_a __lowerCamelCase = idx_a * b_ceil return area if __name__ == "__main__": print(f"""{solution() = }""")
80
1
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar UpperCAmelCase_ = TypeVar('T') class lowerCamelCase__( Generic[T]): def __init__( self: Dict , UpperCamelCase_: T ): __lowerCamelCase = data __lowerCamelCase = None def __str__( self: Optional[int] ): return F'{self.data}' class lowerCamelCase__( Generic[T]): def __init__( self: int ): __lowerCamelCase = None def __iter__( self: Tuple ): __lowerCamelCase = self.top while node: yield node.data __lowerCamelCase = node.next def __str__( self: List[Any] ): return "->".join([str(UpperCamelCase_ ) for item in self] ) def __len__( self: str ): return len(tuple(iter(self ) ) ) def lowerCAmelCase__ ( self: List[str] ): return self.top is None def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: T ): __lowerCamelCase = Node(UpperCamelCase_ ) if not self.is_empty(): __lowerCamelCase = self.top __lowerCamelCase = node def lowerCAmelCase__ ( self: str ): if self.is_empty(): raise IndexError("""pop from empty stack""" ) assert isinstance(self.top , UpperCamelCase_ ) __lowerCamelCase = self.top __lowerCamelCase = self.top.next return pop_node.data def lowerCAmelCase__ ( self: Tuple ): if self.is_empty(): raise IndexError("""peek from empty stack""" ) assert self.top is not None return self.top.data def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = None if __name__ == "__main__": from doctest import testmod testmod()
80
import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = [] __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCamelCase_ ) __lowerCamelCase = resnets __lowerCamelCase = attentions if self.add_downsample: __lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=True ): __lowerCamelCase = () for resnet, attn in zip(self.resnets , self.attentions ): __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) __lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: __lowerCamelCase = self.downsamplers_a(UpperCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = resnets if self.add_downsample: __lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=True ): __lowerCamelCase = () for resnet in self.resnets: __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: __lowerCamelCase = self.downsamplers_a(UpperCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = [] __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels __lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCamelCase_ ) __lowerCamelCase = resnets __lowerCamelCase = attentions if self.add_upsample: __lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any]=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states __lowerCamelCase = res_hidden_states_tuple[-1] __lowerCamelCase = res_hidden_states_tuple[:-1] __lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) __lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) if self.add_upsample: __lowerCamelCase = self.upsamplers_a(UpperCamelCase_ ) return hidden_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels __lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = resnets if self.add_upsample: __lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=True ): for resnet in self.resnets: # pop res hidden states __lowerCamelCase = res_hidden_states_tuple[-1] __lowerCamelCase = res_hidden_states_tuple[:-1] __lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) if self.add_upsample: __lowerCamelCase = self.upsamplers_a(UpperCamelCase_ ) return hidden_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: int ): # there is always at least one resnet __lowerCamelCase = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] __lowerCamelCase = [] for _ in range(self.num_layers ): __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCamelCase_ ) __lowerCamelCase = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = resnets __lowerCamelCase = attentions def __call__( self: int , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=True ): __lowerCamelCase = self.resnets[0](UpperCamelCase_ , UpperCamelCase_ ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): __lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) return hidden_states
80
1
import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel UpperCAmelCase_ = { 'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm', } UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def lowerCamelCase__ ( A__ : List[Any] , A__ : Optional[Any]=False ): '''simple docstring''' __lowerCamelCase, __lowerCamelCase = create_model( """HTSAT-tiny""" , """roberta""" , A__ , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=A__ , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def lowerCamelCase__ ( A__ : List[Any] ): '''simple docstring''' __lowerCamelCase = {} __lowerCamelCase = R""".*sequential.(\d+).*""" __lowerCamelCase = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: __lowerCamelCase = key.replace(A__ , A__ ) if re.match(A__ , A__ ): # replace sequential layers with list __lowerCamelCase = re.match(A__ , A__ ).group(1 ) __lowerCamelCase = key.replace(f'sequential.{sequential_layer}.' , f'layers.{int(A__ )//3}.linear.' ) elif re.match(A__ , A__ ): __lowerCamelCase = int(re.match(A__ , A__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... __lowerCamelCase = 1 if projecton_layer == 0 else 2 __lowerCamelCase = key.replace(f'_projection.{projecton_layer}.' , f'_projection.linear{transformers_projection_layer}.' ) if "audio" and "qkv" in key: # split qkv into query key and value __lowerCamelCase = value __lowerCamelCase = mixed_qkv.size(0 ) // 3 __lowerCamelCase = mixed_qkv[:qkv_dim] __lowerCamelCase = mixed_qkv[qkv_dim : qkv_dim * 2] __lowerCamelCase = mixed_qkv[qkv_dim * 2 :] __lowerCamelCase = query_layer __lowerCamelCase = key_layer __lowerCamelCase = value_layer else: __lowerCamelCase = value return model_state_dict def lowerCamelCase__ ( A__ : Tuple , A__ : str , A__ : Union[str, Any] , A__ : Union[str, Any]=False ): '''simple docstring''' __lowerCamelCase, __lowerCamelCase = init_clap(A__ , enable_fusion=A__ ) clap_model.eval() __lowerCamelCase = clap_model.state_dict() __lowerCamelCase = rename_state_dict(A__ ) __lowerCamelCase = ClapConfig() __lowerCamelCase = enable_fusion __lowerCamelCase = ClapModel(A__ ) # ignore the spectrogram embedding layer model.load_state_dict(A__ , strict=A__ ) model.save_pretrained(A__ ) transformers_config.save_pretrained(A__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') UpperCAmelCase_ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
80
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging UpperCAmelCase_ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt'] UpperCAmelCase_ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('0.9.0'): raise Exception('requires fairseq >= 0.9.0') logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = ' Hello world! cécé herlolip' UpperCAmelCase_ = [ ('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'), ('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'), ('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'), ('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'), ] def lowerCamelCase__ ( A__ : List[Any] ): '''simple docstring''' __lowerCamelCase = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """_float_tensor""", ] for k in ignore_keys: state_dict.pop(A__ , A__ ) def lowerCamelCase__ ( A__ : Tuple , A__ : Any , A__ : Union[str, Any] ): '''simple docstring''' __lowerCamelCase = dct.pop(A__ ) __lowerCamelCase = val def lowerCamelCase__ ( A__ : Tuple ): '''simple docstring''' __lowerCamelCase = torch.load(A__ , map_location="""cpu""" ) __lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval() hub_interface.model.load_state_dict(sd["""model"""] ) return hub_interface def lowerCamelCase__ ( A__ : List[Any] ): '''simple docstring''' __lowerCamelCase, __lowerCamelCase = emb.weight.shape __lowerCamelCase = nn.Linear(A__ , A__ , bias=A__ ) __lowerCamelCase = emb.weight.data return lin_layer @torch.no_grad() def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None ): '''simple docstring''' if not os.path.exists(A__ ): __lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , A__ ).eval() else: __lowerCamelCase = load_xsum_checkpoint(A__ ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: __lowerCamelCase = checkpoint_path.replace(""".""" , """-""" ) __lowerCamelCase = BartConfig.from_pretrained(A__ ) __lowerCamelCase = bart.encode(A__ ).unsqueeze(0 ) __lowerCamelCase = BartTokenizer.from_pretrained(A__ ).encode(A__ , return_tensors="""pt""" ).unsqueeze(0 ) if not torch.eq(A__ , A__ ).all(): raise ValueError( f'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' ) if checkpoint_path == "bart.large.mnli": __lowerCamelCase = bart.state_dict() remove_ignore_keys_(A__ ) __lowerCamelCase = state_dict["""model.decoder.embed_tokens.weight"""] for src, dest in mnli_rename_keys: rename_key(A__ , A__ , A__ ) __lowerCamelCase = BartForSequenceClassification(A__ ).eval() model.load_state_dict(A__ ) __lowerCamelCase = bart.predict("""mnli""" , A__ , return_logits=A__ ) __lowerCamelCase = model(A__ )[0] # logits else: # no classification heads to worry about __lowerCamelCase = bart.model.state_dict() remove_ignore_keys_(A__ ) __lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""] __lowerCamelCase = bart.extract_features(A__ ) if hf_checkpoint_name == "facebook/bart-large": __lowerCamelCase = BartModel(A__ ).eval() model.load_state_dict(A__ ) __lowerCamelCase = model(A__ ).model[0] else: __lowerCamelCase = BartForConditionalGeneration(A__ ).eval() # an existing summarization ckpt model.model.load_state_dict(A__ ) if hasattr(A__ , """lm_head""" ): __lowerCamelCase = make_linear_from_emb(model.model.shared ) __lowerCamelCase = model.model(A__ )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( f'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum' ) UpperCAmelCase_ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
80
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Tuple = 'bert' def __init__( self: List[str] , UpperCamelCase_: str=3_05_22 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=5_12 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: List[str]=1E-12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: Tuple=True , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Optional[Any] , ): super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache __lowerCamelCase = classifier_dropout class lowerCamelCase__( __lowerCamelCase): @property def lowerCAmelCase__ ( self: Any ): if self.task == "multiple-choice": __lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __lowerCamelCase = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
80
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class lowerCamelCase__: def __init__( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=14 , UpperCamelCase_: int=7 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: int=99 , UpperCamelCase_: str=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: Dict=16 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: List[str]=3 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Tuple=None , ): __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_input_mask __lowerCamelCase = use_labels __lowerCamelCase = use_mc_token_ids __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope __lowerCamelCase = self.vocab_size - 1 def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None if self.use_mc_token_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() __lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase__ ( self: Dict ): return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[str] , *UpperCamelCase_: Optional[Any] ): __lowerCamelCase = CTRLModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ ) model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , *UpperCamelCase_: Tuple ): __lowerCamelCase = CTRLLMHeadModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ) = config_and_inputs __lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask} return config, inputs_dict def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , *UpperCamelCase_: Union[str, Any] ): __lowerCamelCase = self.num_labels __lowerCamelCase = CTRLForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Any = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () UpperCAmelCase__ : Optional[Any] = (CTRLLMHeadModel,) if is_torch_available() else () UpperCAmelCase__ : int = ( { 'feature-extraction': CTRLModel, 'text-classification': CTRLForSequenceClassification, 'text-generation': CTRLLMHeadModel, 'zero-shot': CTRLForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ : List[str] = True UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Optional[Any] = False def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = CTRLModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 ) def lowerCAmelCase__ ( self: Optional[int] ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self: Optional[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase__ ( self: List[Any] ): pass @slow def lowerCAmelCase__ ( self: Optional[Any] ): for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = CTRLModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :) def lowerCAmelCase__ ( self: Optional[Any] ): pass @require_torch class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: List[str] ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" ) model.to(UpperCamelCase_ ) __lowerCamelCase = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=UpperCamelCase_ ) # Legal the president is __lowerCamelCase = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a __lowerCamelCase = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ ) self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
80
1
import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever UpperCAmelCase_ = logging.getLogger(__name__) class lowerCamelCase__( __lowerCamelCase): def __init__( self: Any , UpperCamelCase_: Dict , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: Dict=None ): super().__init__( UpperCamelCase_ , question_encoder_tokenizer=UpperCamelCase_ , generator_tokenizer=UpperCamelCase_ , index=UpperCamelCase_ , init_retrieval=UpperCamelCase_ , ) __lowerCamelCase = None def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: int ): logger.info("""initializing retrieval""" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("""dist initialized""" ) # needs to be set manually __lowerCamelCase = self._infer_socket_ifname() # avoid clash with the NCCL port __lowerCamelCase = str(distributed_port + 1 ) __lowerCamelCase = dist.new_group(ranks=UpperCamelCase_ , backend="""gloo""" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("""dist not initialized / main""" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def lowerCAmelCase__ ( self: Tuple ): return dist.get_rank(group=self.process_group ) == 0 def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[str]=torch.floataa ): __lowerCamelCase = torch.empty(UpperCamelCase_ , dtype=UpperCamelCase_ ) dist.scatter(UpperCamelCase_ , src=0 , scatter_list=UpperCamelCase_ , group=self.process_group ) return target_tensor def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = psutil.net_if_addrs() # a hacky way to deal with varying network interface names __lowerCamelCase = next((addr for addr in addrs if addr.startswith("""e""" )) , UpperCamelCase_ ) return ifname def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: int ): # single GPU training if not dist.is_initialized(): __lowerCamelCase, __lowerCamelCase = self._main_retrieve(UpperCamelCase_ , UpperCamelCase_ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase_ ) # distributed training __lowerCamelCase = dist.get_world_size(group=self.process_group ) # gather logic __lowerCamelCase = None if self._is_main(): __lowerCamelCase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCamelCase_ )] dist.gather(torch.tensor(UpperCamelCase_ ) , dst=0 , gather_list=UpperCamelCase_ , group=self.process_group ) # scatter logic __lowerCamelCase = question_hidden_states.shape[0] __lowerCamelCase = [] __lowerCamelCase = [] if self._is_main(): assert len(UpperCamelCase_ ) == world_size __lowerCamelCase, __lowerCamelCase = self._main_retrieve(torch.cat(UpperCamelCase_ ).numpy() , UpperCamelCase_ ) __lowerCamelCase, __lowerCamelCase = torch.tensor(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ ) __lowerCamelCase = self._chunk_tensor(UpperCamelCase_ , UpperCamelCase_ ) __lowerCamelCase = self._chunk_tensor(UpperCamelCase_ , UpperCamelCase_ ) __lowerCamelCase = self._scattered(UpperCamelCase_ , [n_queries, n_docs] , target_type=torch.intaa ) __lowerCamelCase = self._scattered(UpperCamelCase_ , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCamelCase_ )
80
def lowerCamelCase__ ( A__ : int = 2000000 ): '''simple docstring''' __lowerCamelCase = [0 for i in range(n + 1 )] __lowerCamelCase = 1 __lowerCamelCase = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , A__ ): __lowerCamelCase = 1 __lowerCamelCase = 0 for i in range(A__ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f"""{solution() = }""")
80
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json', } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Tuple = 'deta' UpperCAmelCase__ : Union[str, Any] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self: Optional[Any] , UpperCamelCase_: Dict=None , UpperCamelCase_: List[str]=9_00 , UpperCamelCase_: Union[str, Any]=20_48 , UpperCamelCase_: int=6 , UpperCamelCase_: Union[str, Any]=20_48 , UpperCamelCase_: List[str]=8 , UpperCamelCase_: Optional[int]=6 , UpperCamelCase_: Optional[Any]=10_24 , UpperCamelCase_: int=8 , UpperCamelCase_: List[str]=0.0 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Dict="relu" , UpperCamelCase_: Optional[int]=2_56 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Tuple=0.0 , UpperCamelCase_: Union[str, Any]=0.02 , UpperCamelCase_: List[str]=1.0 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Tuple=False , UpperCamelCase_: Union[str, Any]="sine" , UpperCamelCase_: Optional[int]=5 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: List[Any]=4 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: str=3_00 , UpperCamelCase_: str=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=1 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Dict=2 , UpperCamelCase_: Any=1 , UpperCamelCase_: Dict=1 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: str=0.25 , **UpperCamelCase_: Tuple , ): if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) __lowerCamelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] ) else: if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowerCamelCase = backbone_config.pop("""model_type""" ) __lowerCamelCase = CONFIG_MAPPING[backbone_model_type] __lowerCamelCase = config_class.from_dict(UpperCamelCase_ ) __lowerCamelCase = backbone_config __lowerCamelCase = num_queries __lowerCamelCase = max_position_embeddings __lowerCamelCase = d_model __lowerCamelCase = encoder_ffn_dim __lowerCamelCase = encoder_layers __lowerCamelCase = encoder_attention_heads __lowerCamelCase = decoder_ffn_dim __lowerCamelCase = decoder_layers __lowerCamelCase = decoder_attention_heads __lowerCamelCase = dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = activation_function __lowerCamelCase = init_std __lowerCamelCase = init_xavier_std __lowerCamelCase = encoder_layerdrop __lowerCamelCase = auxiliary_loss __lowerCamelCase = position_embedding_type # deformable attributes __lowerCamelCase = num_feature_levels __lowerCamelCase = encoder_n_points __lowerCamelCase = decoder_n_points __lowerCamelCase = two_stage __lowerCamelCase = two_stage_num_proposals __lowerCamelCase = with_box_refine __lowerCamelCase = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher __lowerCamelCase = class_cost __lowerCamelCase = bbox_cost __lowerCamelCase = giou_cost # Loss coefficients __lowerCamelCase = mask_loss_coefficient __lowerCamelCase = dice_loss_coefficient __lowerCamelCase = bbox_loss_coefficient __lowerCamelCase = giou_loss_coefficient __lowerCamelCase = eos_coefficient __lowerCamelCase = focal_alpha super().__init__(is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_ ) @property def lowerCAmelCase__ ( self: List[str] ): return self.encoder_attention_heads @property def lowerCAmelCase__ ( self: Union[str, Any] ): return self.d_model def lowerCAmelCase__ ( self: int ): __lowerCamelCase = copy.deepcopy(self.__dict__ ) __lowerCamelCase = self.backbone_config.to_dict() __lowerCamelCase = self.__class__.model_type return output
80
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase): UpperCAmelCase__ : Dict = 1 @register_to_config def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None ): # set `betas`, `alphas`, `timesteps` self.set_timesteps(UpperCamelCase_ ) # standard deviation of the initial noise distribution __lowerCamelCase = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. __lowerCamelCase = 4 # running values __lowerCamelCase = [] def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None ): __lowerCamelCase = num_inference_steps __lowerCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] __lowerCamelCase = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: __lowerCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: __lowerCamelCase = torch.sin(steps * math.pi / 2 ) ** 2 __lowerCamelCase = (1.0 - self.betas**2) ** 0.5 __lowerCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] __lowerCamelCase = timesteps.to(UpperCamelCase_ ) __lowerCamelCase = [] def lowerCAmelCase__ ( self: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True , ): if self.num_inference_steps is None: raise ValueError( """Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" ) __lowerCamelCase = (self.timesteps == timestep).nonzero().item() __lowerCamelCase = timestep_index + 1 __lowerCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(UpperCamelCase_ ) if len(self.ets ) == 1: __lowerCamelCase = self.ets[-1] elif len(self.ets ) == 2: __lowerCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: __lowerCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: __lowerCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) __lowerCamelCase = self._get_prev_sample(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ): return sample def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any ): __lowerCamelCase = self.alphas[timestep_index] __lowerCamelCase = self.betas[timestep_index] __lowerCamelCase = self.alphas[prev_timestep_index] __lowerCamelCase = self.betas[prev_timestep_index] __lowerCamelCase = (sample - sigma * ets) / max(UpperCamelCase_ , 1E-8 ) __lowerCamelCase = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self: List[Any] ): return self.config.num_train_timesteps
80
1
def lowerCamelCase__ ( A__ : Optional[int] ): '''simple docstring''' if collection == []: return [] # get some information about the collection __lowerCamelCase = len(A__ ) __lowerCamelCase = max(A__ ) __lowerCamelCase = min(A__ ) # create the counting array __lowerCamelCase = coll_max + 1 - coll_min __lowerCamelCase = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , A__ ): __lowerCamelCase = counting_arr[i] + counting_arr[i - 1] # create the output collection __lowerCamelCase = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , A__ ) ): __lowerCamelCase = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def lowerCamelCase__ ( A__ : Optional[int] ): '''simple docstring''' return "".join([chr(A__ ) for i in counting_sort([ord(A__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt" UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase_ = [int(item) for item in user_input.split(',')] print(counting_sort(unsorted))
80
import os from collections.abc import Iterator def lowerCamelCase__ ( A__ : str = "." ): '''simple docstring''' for dir_path, dir_names, filenames in os.walk(A__ ): __lowerCamelCase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(A__ )[1] in (".py", ".ipynb"): yield os.path.join(A__ , A__ ).lstrip("""./""" ) def lowerCamelCase__ ( A__ : Optional[int] ): '''simple docstring''' return f'{i * " "}*' if i else "\n##" def lowerCamelCase__ ( A__ : str , A__ : str ): '''simple docstring''' __lowerCamelCase = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part: print(f'{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}' ) return new_path def lowerCamelCase__ ( A__ : str = "." ): '''simple docstring''' __lowerCamelCase = """""" for filepath in sorted(good_file_paths(A__ ) ): __lowerCamelCase, __lowerCamelCase = os.path.split(A__ ) if filepath != old_path: __lowerCamelCase = print_path(A__ , A__ ) __lowerCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0 __lowerCamelCase = f'{filepath}/{filename}'.replace(""" """ , """%20""" ) __lowerCamelCase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0] print(f'{md_prefix(A__ )} [{filename}]({url})' ) if __name__ == "__main__": print_directory_md('.')
80
1
import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor UpperCAmelCase_ = random.Random() def lowerCamelCase__ ( A__ : List[Any] , A__ : Dict=1.0 , A__ : List[Any]=None , A__ : Optional[int]=None ): '''simple docstring''' if rng is None: __lowerCamelCase = global_rng __lowerCamelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class lowerCamelCase__( unittest.TestCase): def __init__( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Optional[int]=7 , UpperCamelCase_: Dict=4_00 , UpperCamelCase_: Dict=20_00 , UpperCamelCase_: str=24 , UpperCamelCase_: Any=24 , UpperCamelCase_: Optional[Any]=0.0 , UpperCamelCase_: Optional[int]=1_60_00 , UpperCamelCase_: Any=True , UpperCamelCase_: Tuple=True , ): __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = min_seq_length __lowerCamelCase = max_seq_length __lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __lowerCamelCase = feature_size __lowerCamelCase = num_mel_bins __lowerCamelCase = padding_value __lowerCamelCase = sampling_rate __lowerCamelCase = return_attention_mask __lowerCamelCase = do_normalize def lowerCAmelCase__ ( self: Optional[Any] ): return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: Optional[Any]=False ): def _flatten(UpperCamelCase_: Any ): return list(itertools.chain(*UpperCamelCase_ ) ) if equal_length: __lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __lowerCamelCase = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __lowerCamelCase = [np.asarray(UpperCamelCase_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowerCamelCase__( __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Any = SpeechaTextFeatureExtractor if is_speech_available() else None def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = SpeechaTextFeatureExtractionTester(self ) def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Dict ): self.assertTrue(np.all(np.mean(UpperCamelCase_ , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(UpperCamelCase_ , axis=0 ) - 1 ) < 1E-3 ) ) def lowerCAmelCase__ ( self: Dict ): # Tests that all call wrap to encode_plus and batch_encode_plus __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] __lowerCamelCase = [np.asarray(UpperCamelCase_ ) for speech_input in speech_inputs] # Test feature size __lowerCamelCase = feature_extractor(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="""np""" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input __lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features __lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) ) # Test batched __lowerCamelCase = feature_extractor(UpperCamelCase_ , return_tensors="""np""" ).input_features __lowerCamelCase = feature_extractor(UpperCamelCase_ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. __lowerCamelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] __lowerCamelCase = np.asarray(UpperCamelCase_ ) __lowerCamelCase = feature_extractor(UpperCamelCase_ , return_tensors="""np""" ).input_features __lowerCamelCase = feature_extractor(UpperCamelCase_ , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) ) def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] __lowerCamelCase = ["""longest""", """max_length""", """do_not_pad"""] __lowerCamelCase = [None, 16, None] for max_length, padding in zip(UpperCamelCase_ , UpperCamelCase_ ): __lowerCamelCase = feature_extractor( UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ ) __lowerCamelCase = inputs.input_features __lowerCamelCase = inputs.attention_mask __lowerCamelCase = [np.sum(UpperCamelCase_ ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] __lowerCamelCase = ["""longest""", """max_length""", """do_not_pad"""] __lowerCamelCase = [None, 16, None] for max_length, padding in zip(UpperCamelCase_ , UpperCamelCase_ ): __lowerCamelCase = feature_extractor( UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="""np""" , return_attention_mask=UpperCamelCase_ ) __lowerCamelCase = inputs.input_features __lowerCamelCase = inputs.attention_mask __lowerCamelCase = [np.sum(UpperCamelCase_ ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] __lowerCamelCase = feature_extractor( UpperCamelCase_ , padding="""max_length""" , max_length=4 , truncation=UpperCamelCase_ , return_tensors="""np""" , return_attention_mask=UpperCamelCase_ , ) __lowerCamelCase = inputs.input_features __lowerCamelCase = inputs.attention_mask __lowerCamelCase = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] __lowerCamelCase = feature_extractor( UpperCamelCase_ , padding="""longest""" , max_length=4 , truncation=UpperCamelCase_ , return_tensors="""np""" , return_attention_mask=UpperCamelCase_ , ) __lowerCamelCase = inputs.input_features __lowerCamelCase = inputs.attention_mask __lowerCamelCase = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 24) ) __lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] __lowerCamelCase = feature_extractor( UpperCamelCase_ , padding="""longest""" , max_length=16 , truncation=UpperCamelCase_ , return_tensors="""np""" , return_attention_mask=UpperCamelCase_ , ) __lowerCamelCase = inputs.input_features __lowerCamelCase = inputs.attention_mask __lowerCamelCase = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 24) ) def lowerCAmelCase__ ( self: Any ): import torch __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCamelCase = np.random.rand(1_00 , 32 ).astype(np.floataa ) __lowerCamelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __lowerCamelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) __lowerCamelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ): from datasets import load_dataset __lowerCamelCase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech __lowerCamelCase = ds.sort("""id""" ).select(range(UpperCamelCase_ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def lowerCAmelCase__ ( self: Optional[Any] ): # fmt: off __lowerCamelCase = np.array([ -1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241, -1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128, -1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625, ] ) # fmt: on __lowerCamelCase = self._load_datasamples(1 ) __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCamelCase = feature_extractor(UpperCamelCase_ , return_tensors="""pt""" ).input_features self.assertEquals(input_features.shape , (1, 5_84, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30] , UpperCamelCase_ , atol=1E-4 ) )
80
from __future__ import annotations def lowerCamelCase__ ( A__ : list ): '''simple docstring''' if not nums: raise ValueError("""List is empty""" ) return sum(A__ ) / len(A__ ) if __name__ == "__main__": import doctest doctest.testmod()
80
1
import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = [] __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCamelCase_ ) __lowerCamelCase = resnets __lowerCamelCase = attentions if self.add_downsample: __lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=True ): __lowerCamelCase = () for resnet, attn in zip(self.resnets , self.attentions ): __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) __lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: __lowerCamelCase = self.downsamplers_a(UpperCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = resnets if self.add_downsample: __lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=True ): __lowerCamelCase = () for resnet in self.resnets: __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: __lowerCamelCase = self.downsamplers_a(UpperCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = [] __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels __lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCamelCase_ ) __lowerCamelCase = resnets __lowerCamelCase = attentions if self.add_upsample: __lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any]=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states __lowerCamelCase = res_hidden_states_tuple[-1] __lowerCamelCase = res_hidden_states_tuple[:-1] __lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) __lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) if self.add_upsample: __lowerCamelCase = self.upsamplers_a(UpperCamelCase_ ) return hidden_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels __lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = resnets if self.add_upsample: __lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=True ): for resnet in self.resnets: # pop res hidden states __lowerCamelCase = res_hidden_states_tuple[-1] __lowerCamelCase = res_hidden_states_tuple[:-1] __lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) if self.add_upsample: __lowerCamelCase = self.upsamplers_a(UpperCamelCase_ ) return hidden_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: int ): # there is always at least one resnet __lowerCamelCase = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] __lowerCamelCase = [] for _ in range(self.num_layers ): __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCamelCase_ ) __lowerCamelCase = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = resnets __lowerCamelCase = attentions def __call__( self: int , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=True ): __lowerCamelCase = self.resnets[0](UpperCamelCase_ , UpperCamelCase_ ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): __lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) return hidden_states
80
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase): UpperCAmelCase__ : Any = 'maskformer-swin' UpperCAmelCase__ : List[Any] = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self: Any , UpperCamelCase_: Any=2_24 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Optional[int]=96 , UpperCamelCase_: List[str]=[2, 2, 6, 2] , UpperCamelCase_: Optional[Any]=[3, 6, 12, 24] , UpperCamelCase_: str=7 , UpperCamelCase_: int=4.0 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=1E-5 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] , ): super().__init__(**UpperCamelCase_ ) __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = num_channels __lowerCamelCase = embed_dim __lowerCamelCase = depths __lowerCamelCase = len(UpperCamelCase_ ) __lowerCamelCase = num_heads __lowerCamelCase = window_size __lowerCamelCase = mlp_ratio __lowerCamelCase = qkv_bias __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = drop_path_rate __lowerCamelCase = hidden_act __lowerCamelCase = use_absolute_embeddings __lowerCamelCase = layer_norm_eps __lowerCamelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowerCamelCase = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) ) __lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(UpperCamelCase_ ) + 1 )] __lowerCamelCase, __lowerCamelCase = get_aligned_output_features_output_indices( out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
80
1
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(__lowerCamelCase) class lowerCamelCase__( __lowerCamelCase): def __init__( self: List[Any] , *UpperCamelCase_: Optional[Any] , **UpperCamelCase_: Dict ): super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowerCAmelCase__ ( self: str , UpperCamelCase_: int=None , UpperCamelCase_: Dict=None , UpperCamelCase_: Tuple=None ): __lowerCamelCase = {} __lowerCamelCase = {} if prompt is not None: __lowerCamelCase = prompt if generate_kwargs is not None: __lowerCamelCase = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __lowerCamelCase = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) __lowerCamelCase = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self: Optional[Any] , UpperCamelCase_: Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCamelCase_: Union[str, Any] ): return super().__call__(UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=None ): __lowerCamelCase = load_image(UpperCamelCase_ ) if prompt is not None: if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError( F'Received an invalid text input, got - {type(UpperCamelCase_ )} - but expected a single string. ' """Note also that one single text can be provided for conditional image to text generation.""" ) __lowerCamelCase = self.model.config.model_type if model_type == "git": __lowerCamelCase = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework ) __lowerCamelCase = self.tokenizer(text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ).input_ids __lowerCamelCase = [self.tokenizer.cls_token_id] + input_ids __lowerCamelCase = torch.tensor(UpperCamelCase_ ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": __lowerCamelCase = self.image_processor(images=UpperCamelCase_ , header_text=UpperCamelCase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __lowerCamelCase = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework ) __lowerCamelCase = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework ) model_inputs.update(UpperCamelCase_ ) else: raise ValueError(F'Model type {model_type} does not support conditional text generation' ) else: __lowerCamelCase = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __lowerCamelCase = None return model_inputs def lowerCAmelCase__ ( self: str , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any]=None ): # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , UpperCamelCase_ ) and all(x is None for x in model_inputs["""input_ids"""] ) ): __lowerCamelCase = None if generate_kwargs is None: __lowerCamelCase = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __lowerCamelCase = model_inputs.pop(self.model.main_input_name ) __lowerCamelCase = self.model.generate(UpperCamelCase_ , **UpperCamelCase_ , **UpperCamelCase_ ) return model_outputs def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Dict ): __lowerCamelCase = [] for output_ids in model_outputs: __lowerCamelCase = { """generated_text""": self.tokenizer.decode( UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , ) } records.append(UpperCamelCase_ ) return records
80
from __future__ import annotations def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): __lowerCamelCase, __lowerCamelCase = array[indexa], array[indexa] def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if length > 1: __lowerCamelCase = int(length / 2 ) for i in range(A__ , low + middle ): comp_and_swap(A__ , A__ , i + middle , A__ ) bitonic_merge(A__ , A__ , A__ , A__ ) bitonic_merge(A__ , low + middle , A__ , A__ ) def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if length > 1: __lowerCamelCase = int(length / 2 ) bitonic_sort(A__ , A__ , A__ , 1 ) bitonic_sort(A__ , low + middle , A__ , 0 ) bitonic_merge(A__ , A__ , A__ , A__ ) if __name__ == "__main__": UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(',')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('\nSorted array in ascending order is: ', end='') print(*unsorted, sep=', ') bitonic_merge(unsorted, 0, len(unsorted), 0) print('Sorted array in descending order is: ', end='') print(*unsorted, sep=', ')
80
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = tempfile.mkdtemp() # fmt: off __lowerCamelCase = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on __lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) __lowerCamelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] __lowerCamelCase = {"""unk_token""": """<unk>"""} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase_ ) ) __lowerCamelCase = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073], """image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711], } __lowerCamelCase = os.path.join(self.tmpdirname , UpperCamelCase_ ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] , **UpperCamelCase_: int ): return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[int] , **UpperCamelCase_: Union[str, Any] ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: int , **UpperCamelCase_: List[Any] ): return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: int ): shutil.rmtree(self.tmpdirname ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCamelCase = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = self.get_image_processor() __lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ ) __lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_ ) self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_ ) self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __lowerCamelCase = self.get_image_processor(do_normalize=UpperCamelCase_ ) __lowerCamelCase = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase_ ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = image_processor(UpperCamelCase_ , return_tensors="""np""" ) __lowerCamelCase = processor(images=UpperCamelCase_ , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCAmelCase__ ( self: str ): __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) __lowerCamelCase = """lower newer""" __lowerCamelCase = processor(text=UpperCamelCase_ , return_tensors="""np""" ) __lowerCamelCase = tokenizer(UpperCamelCase_ , return_tensors="""np""" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) __lowerCamelCase = """lower newer""" __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=UpperCamelCase_ , images=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = """google/owlvit-base-patch32""" __lowerCamelCase = OwlViTProcessor.from_pretrained(UpperCamelCase_ ) __lowerCamelCase = ["""cat""", """nasa badge"""] __lowerCamelCase = processor(text=UpperCamelCase_ ) __lowerCamelCase = 16 self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] ) self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = """google/owlvit-base-patch32""" __lowerCamelCase = OwlViTProcessor.from_pretrained(UpperCamelCase_ ) __lowerCamelCase = [["""cat""", """nasa badge"""], ["""person"""]] __lowerCamelCase = processor(text=UpperCamelCase_ ) __lowerCamelCase = 16 __lowerCamelCase = len(UpperCamelCase_ ) __lowerCamelCase = max([len(UpperCamelCase_ ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] ) self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def lowerCAmelCase__ ( self: str ): __lowerCamelCase = """google/owlvit-base-patch32""" __lowerCamelCase = OwlViTProcessor.from_pretrained(UpperCamelCase_ ) __lowerCamelCase = ["""cat""", """nasa badge"""] __lowerCamelCase = processor(text=UpperCamelCase_ ) __lowerCamelCase = 16 __lowerCamelCase = inputs["""input_ids"""] __lowerCamelCase = [ [4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] ) self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(images=UpperCamelCase_ , query_images=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) __lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCamelCase = processor.batch_decode(UpperCamelCase_ ) __lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
80
from ... import PretrainedConfig UpperCAmelCase_ = { 'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json', } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP UpperCAmelCase__ : Dict = 'nezha' def __init__( self: Dict , UpperCamelCase_: Any=2_11_28 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Optional[int]=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Union[str, Any]=5_12 , UpperCamelCase_: Any=64 , UpperCamelCase_: Dict=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: Optional[Any]=1E-12 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Any=0 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: str=True , **UpperCamelCase_: Any , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = max_relative_position __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = classifier_dropout __lowerCamelCase = use_cache
80
1
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path UpperCAmelCase_ = Path(__file__).resolve().parents[3] / 'src' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) UpperCAmelCase_ = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'} UpperCAmelCase_ = 'zero2' UpperCAmelCase_ = 'zero3' UpperCAmelCase_ = [ZEROa, ZEROa] def lowerCamelCase__ ( A__ : str , A__ : Tuple , A__ : str ): '''simple docstring''' __lowerCamelCase = parameterized.to_safe_name("""_""".join(str(A__ ) for x in param.args ) ) return f'{func.__name__}_{param_based_name}' # Cartesian-product of zero stages with models to test UpperCAmelCase_ = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class lowerCamelCase__( __lowerCamelCase): @parameterized.expand(UpperCamelCase_ , name_func=UpperCamelCase_ ) def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[Any] , UpperCamelCase_: List[str] ): self.run_and_check( stage=UpperCamelCase_ , model=UpperCamelCase_ , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , ) @require_torch_multi_gpu @parameterized.expand(UpperCamelCase_ , name_func=UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] ): self.run_and_check( stage=UpperCamelCase_ , model=UpperCamelCase_ , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , ) @parameterized.expand(UpperCamelCase_ , name_func=UpperCamelCase_ ) def lowerCAmelCase__ ( self: int , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int] ): self.run_and_check( stage=UpperCamelCase_ , model=UpperCamelCase_ , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , ) @require_torch_multi_gpu @parameterized.expand(UpperCamelCase_ , name_func=UpperCamelCase_ ) def lowerCAmelCase__ ( self: str , UpperCamelCase_: List[str] , UpperCamelCase_: Union[str, Any] ): self.run_and_check( stage=UpperCamelCase_ , model=UpperCamelCase_ , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , ) def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[int] ): # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: int = 10 , UpperCamelCase_: bool = True , UpperCamelCase_: bool = True , UpperCamelCase_: bool = True , ): __lowerCamelCase = models[model] __lowerCamelCase = self.run_trainer( stage=UpperCamelCase_ , model_name=UpperCamelCase_ , eval_steps=UpperCamelCase_ , num_train_epochs=1 , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , ) self.do_checks(UpperCamelCase_ ) return output_dir def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: int = 10 , UpperCamelCase_: int = 1 , UpperCamelCase_: bool = True , UpperCamelCase_: bool = True , ): __lowerCamelCase = self.get_auto_remove_tmp_dir("""./xxx""" , after=UpperCamelCase_ ) __lowerCamelCase = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(UpperCamelCase_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split() if fpaa: args.extend(["""--fp16"""] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __lowerCamelCase = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split() __lowerCamelCase = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'] __lowerCamelCase = self.get_launcher(UpperCamelCase_ ) __lowerCamelCase = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(UpperCamelCase_ , env=self.get_env() ) return output_dir def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Tuple=False ): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) __lowerCamelCase = min(2 , get_gpu_count() ) if distributed else 1 return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
80
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__: def __init__( self: Union[str, Any] , UpperCamelCase_: str = None , UpperCamelCase_: uuid.UUID = None , UpperCamelCase_: Dict=None , UpperCamelCase_: Any=None ): if not conversation_id: __lowerCamelCase = uuid.uuida() if past_user_inputs is None: __lowerCamelCase = [] if generated_responses is None: __lowerCamelCase = [] __lowerCamelCase = conversation_id __lowerCamelCase = past_user_inputs __lowerCamelCase = generated_responses __lowerCamelCase = text def __eq__( self: Optional[Any] , UpperCamelCase_: Union[str, Any] ): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowerCAmelCase__ ( self: int , UpperCamelCase_: str , UpperCamelCase_: bool = False ): if self.new_user_input: if overwrite: logger.warning( F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ' F'with: "{text}".' ) __lowerCamelCase = text else: logger.warning( F'User input added while unprocessed input was existing: "{self.new_user_input}" new input ' F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' ) else: __lowerCamelCase = text def lowerCAmelCase__ ( self: List[str] ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __lowerCamelCase = None def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ): self.generated_responses.append(UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self: Union[str, Any] ): __lowerCamelCase = F'Conversation id: {self.uuid} \n' for is_user, text in self.iter_texts(): __lowerCamelCase = """user""" if is_user else """bot""" output += F'{name} >> {text} \n' return output @add_end_docstrings( __lowerCamelCase , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , ) class lowerCamelCase__( __lowerCamelCase): def __init__( self: List[str] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: str ): super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) if self.tokenizer.pad_token_id is None: __lowerCamelCase = self.tokenizer.eos_token def lowerCAmelCase__ ( self: str , UpperCamelCase_: int=None , UpperCamelCase_: Any=None , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: int ): __lowerCamelCase = {} __lowerCamelCase = {} __lowerCamelCase = {} if min_length_for_response is not None: __lowerCamelCase = min_length_for_response if minimum_tokens is not None: __lowerCamelCase = minimum_tokens if "max_length" in generate_kwargs: __lowerCamelCase = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __lowerCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(UpperCamelCase_ ) return preprocess_params, forward_params, postprocess_params def __call__( self: Any , UpperCamelCase_: Union[Conversation, List[Conversation]] , UpperCamelCase_: Optional[int]=0 , **UpperCamelCase_: Optional[int] ): __lowerCamelCase = super().__call__(UpperCamelCase_ , num_workers=UpperCamelCase_ , **UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1: return outputs[0] return outputs def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Conversation , UpperCamelCase_: Optional[Any]=32 ): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ' """Add user inputs with the conversation's `add_user_input` method""" ) if hasattr(self.tokenizer , """_build_conversation_input_ids""" ): __lowerCamelCase = self.tokenizer._build_conversation_input_ids(UpperCamelCase_ ) else: # If the tokenizer cannot handle conversations, we default to only the old version __lowerCamelCase = self._legacy_parse_and_tokenize(UpperCamelCase_ ) if self.framework == "pt": __lowerCamelCase = torch.LongTensor([input_ids] ) elif self.framework == "tf": __lowerCamelCase = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str=10 , **UpperCamelCase_: List[str] ): __lowerCamelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length ) __lowerCamelCase = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' ) __lowerCamelCase = max_length - minimum_tokens __lowerCamelCase = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: __lowerCamelCase = model_inputs["""attention_mask"""][:, -trim:] __lowerCamelCase = model_inputs.pop("""conversation""" ) __lowerCamelCase = max_length __lowerCamelCase = self.model.generate(**UpperCamelCase_ , **UpperCamelCase_ ) if self.model.config.is_encoder_decoder: __lowerCamelCase = 1 else: __lowerCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int=True ): __lowerCamelCase = model_outputs["""output_ids"""] __lowerCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , ) __lowerCamelCase = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(UpperCamelCase_ ) return conversation def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Conversation ): __lowerCamelCase = self.tokenizer.eos_token_id __lowerCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) if len(UpperCamelCase_ ) > self.tokenizer.model_max_length: __lowerCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
80
1
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def lowerCamelCase__ ( A__ : int = 8 ): '''simple docstring''' __lowerCamelCase = ascii_letters + digits + punctuation return "".join(secrets.choice(A__ ) for _ in range(A__ ) ) def lowerCamelCase__ ( A__ : str , A__ : int ): '''simple docstring''' i -= len(A__ ) __lowerCamelCase = i // 3 __lowerCamelCase = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) __lowerCamelCase = ( chars_incl + random(A__ , quotient + remainder ) + random(A__ , A__ ) + random(A__ , A__ ) ) __lowerCamelCase = list(A__ ) shuffle(A__ ) return "".join(A__ ) # random is a generalised function for letters, characters and numbers def lowerCamelCase__ ( A__ : str , A__ : int ): '''simple docstring''' return "".join(secrets.choice(A__ ) for _ in range(A__ ) ) def lowerCamelCase__ ( A__ : List[Any] , A__ : Optional[Any] ): '''simple docstring''' pass # Put your code here... def lowerCamelCase__ ( A__ : List[str] , A__ : Union[str, Any] ): '''simple docstring''' pass # Put your code here... def lowerCamelCase__ ( A__ : int , A__ : Union[str, Any] ): '''simple docstring''' pass # Put your code here... def lowerCamelCase__ ( A__ : str , A__ : int = 8 ): '''simple docstring''' if len(A__ ) < min_length: # Your Password must be at least 8 characters long return False __lowerCamelCase = any(char in ascii_uppercase for char in password ) __lowerCamelCase = any(char in ascii_lowercase for char in password ) __lowerCamelCase = any(char in digits for char in password ) __lowerCamelCase = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = int(input("""Please indicate the max length of your password: """ ).strip() ) __lowerCamelCase = input( """Please indicate the characters that must be in your password: """ ).strip() print("""Password generated:""" , password_generator(A__ ) ) print( """Alternative Password generated:""" , alternative_password_generator(A__ , A__ ) , ) print("""[If you are thinking of using this passsword, You better save it.]""" ) if __name__ == "__main__": main()
80
import math def lowerCamelCase__ ( A__ : int ): '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = 2 __lowerCamelCase = int(math.sqrt(A__ ) ) # Size of every segment __lowerCamelCase = [True] * (end + 1) __lowerCamelCase = [] while start <= end: if temp[start] is True: in_prime.append(A__ ) for i in range(start * start , end + 1 , A__ ): __lowerCamelCase = False start += 1 prime += in_prime __lowerCamelCase = end + 1 __lowerCamelCase = min(2 * end , A__ ) while low <= n: __lowerCamelCase = [True] * (high - low + 1) for each in in_prime: __lowerCamelCase = math.floor(low / each ) * each if t < low: t += each for j in range(A__ , high + 1 , A__ ): __lowerCamelCase = False for j in range(len(A__ ) ): if temp[j] is True: prime.append(j + low ) __lowerCamelCase = high + 1 __lowerCamelCase = min(high + end , A__ ) return prime print(sieve(10**6))
80
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ = { 'configuration_xlm_roberta_xl': [ 'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMRobertaXLConfig', 'XLMRobertaXLOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMRobertaXLForCausalLM', 'XLMRobertaXLForMaskedLM', 'XLMRobertaXLForMultipleChoice', 'XLMRobertaXLForQuestionAnswering', 'XLMRobertaXLForSequenceClassification', 'XLMRobertaXLForTokenClassification', 'XLMRobertaXLModel', 'XLMRobertaXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
80
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model') class lowerCamelCase__( __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : int = BartphoTokenizer UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : List[str] = True def lowerCAmelCase__ ( self: Tuple ): super().setUp() __lowerCamelCase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] __lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) __lowerCamelCase = {"""unk_token""": """<unk>"""} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] ) with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp: for token in vocab_tokens: fp.write(F'{token} {vocab_tokens[token]}\n' ) __lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: List[str] ): kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ): __lowerCamelCase = """This is a là test""" __lowerCamelCase = """This is a<unk><unk> test""" return input_text, output_text def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map ) __lowerCamelCase = """This is a là test""" __lowerCamelCase = """▁This ▁is ▁a ▁l à ▁t est""".split() __lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __lowerCamelCase = tokens + [tokenizer.unk_token] __lowerCamelCase = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
80
1
def lowerCamelCase__ ( A__ : str ): '''simple docstring''' if not head: return True # split the list to two parts __lowerCamelCase, __lowerCamelCase = head.next, head while fast and fast.next: __lowerCamelCase = fast.next.next __lowerCamelCase = slow.next __lowerCamelCase = slow.next __lowerCamelCase = None # Don't forget here! But forget still works! # reverse the second part __lowerCamelCase = None while second: __lowerCamelCase = second.next __lowerCamelCase = node __lowerCamelCase = second __lowerCamelCase = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False __lowerCamelCase = node.next __lowerCamelCase = head.next return True def lowerCamelCase__ ( A__ : Union[str, Any] ): '''simple docstring''' if not head or not head.next: return True # 1. Get the midpoint (slow) __lowerCamelCase = __lowerCamelCase = __lowerCamelCase = head while fast and fast.next: __lowerCamelCase, __lowerCamelCase = fast.next.next, slow.next # 2. Push the second half into the stack __lowerCamelCase = [slow.val] while slow.next: __lowerCamelCase = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False __lowerCamelCase = cur.next return True def lowerCamelCase__ ( A__ : Dict ): '''simple docstring''' if not head or not head.next: return True __lowerCamelCase = {} __lowerCamelCase = 0 while head: if head.val in d: d[head.val].append(A__ ) else: __lowerCamelCase = [pos] __lowerCamelCase = head.next pos += 1 __lowerCamelCase = pos - 1 __lowerCamelCase = 0 for v in d.values(): if len(A__ ) % 2 != 0: middle += 1 else: __lowerCamelCase = 0 for i in range(0 , len(A__ ) ): if v[i] + v[len(A__ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
80
def lowerCamelCase__ ( A__ : dict ): '''simple docstring''' __lowerCamelCase = set() # To detect a back edge, keep track of vertices currently in the recursion stack __lowerCamelCase = set() return any( node not in visited and depth_first_search(A__ , A__ , A__ , A__ ) for node in graph ) def lowerCamelCase__ ( A__ : dict , A__ : int , A__ : set , A__ : set ): '''simple docstring''' visited.add(A__ ) rec_stk.add(A__ ) for node in graph[vertex]: if node not in visited: if depth_first_search(A__ , A__ , A__ , A__ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(A__ ) return False if __name__ == "__main__": from doctest import testmod testmod()
80
1
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class lowerCamelCase__: def __init__( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: List[Any]=13 , UpperCamelCase_: Dict=7 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: str=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: Optional[int]=99 , UpperCamelCase_: List[Any]=64 , UpperCamelCase_: Dict=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Any=4 , UpperCamelCase_: List[str]=37 , UpperCamelCase_: Tuple="gelu" , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: Optional[Any]=5_12 , UpperCamelCase_: List[str]=16 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[str]=3 , UpperCamelCase_: int=4 , UpperCamelCase_: List[Any]=None , ): __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = embedding_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase__ ( self: List[str] ): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: str ): __lowerCamelCase = MegatronBertModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: str ): __lowerCamelCase = MegatronBertForMaskedLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: int , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int] ): __lowerCamelCase = MegatronBertForCausalLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: str ): __lowerCamelCase = MegatronBertForNextSentencePrediction(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Dict , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[int] ): __lowerCamelCase = MegatronBertForPreTraining(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , next_sentence_label=UpperCamelCase_ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[int] ): __lowerCamelCase = MegatronBertForQuestionAnswering(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase__ ( self: str , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any] ): __lowerCamelCase = self.num_labels __lowerCamelCase = MegatronBertForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[Any] ): __lowerCamelCase = self.num_labels __lowerCamelCase = MegatronBertForTokenClassification(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Dict ): __lowerCamelCase = self.num_choices __lowerCamelCase = MegatronBertForMultipleChoice(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ) = config_and_inputs __lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Optional[int] = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) UpperCAmelCase__ : int = ( { 'feature-extraction': MegatronBertModel, 'fill-mask': MegatronBertForMaskedLM, 'question-answering': MegatronBertForQuestionAnswering, 'text-classification': MegatronBertForSequenceClassification, 'text-generation': MegatronBertForCausalLM, 'token-classification': MegatronBertForTokenClassification, 'zero-shot': MegatronBertForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ : Optional[int] = True # test_resize_embeddings = False UpperCAmelCase__ : Optional[Any] = False def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Tuple=False ): __lowerCamelCase = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ ) if return_labels: if model_class in get_values(UpperCamelCase_ ): __lowerCamelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase_ ) __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ ) return inputs_dict def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = MegatronBertModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 ) def lowerCAmelCase__ ( self: int ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: str ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*UpperCamelCase_ ) def lowerCamelCase__ ( A__ : int ): '''simple docstring''' return torch.tensor( A__ , dtype=torch.long , device=A__ , ) UpperCAmelCase_ = 1E-4 @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase__( unittest.TestCase): @slow @unittest.skip("""Model is not available.""" ) def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = """nvidia/megatron-bert-uncased-345m""" if "MYDIR" in os.environ: __lowerCamelCase = os.path.join(os.environ["""MYDIR"""] , UpperCamelCase_ ) __lowerCamelCase = MegatronBertModel.from_pretrained(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.half() __lowerCamelCase = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] ) with torch.no_grad(): __lowerCamelCase = model(UpperCamelCase_ )[0] __lowerCamelCase = torch.Size((1, 9, 10_24) ) self.assertEqual(output.shape , UpperCamelCase_ ) __lowerCamelCase = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): __lowerCamelCase = output[0, ii, jj] __lowerCamelCase = expected[3 * ii + jj] __lowerCamelCase = """ii={} jj={} a={} b={}""".format(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) self.assertTrue(math.isclose(UpperCamelCase_ , UpperCamelCase_ , rel_tol=UpperCamelCase_ , abs_tol=UpperCamelCase_ ) , msg=UpperCamelCase_ )
80
from __future__ import annotations def lowerCamelCase__ ( A__ : list[float] , A__ : list[float] ): '''simple docstring''' __lowerCamelCase = sorted(numsa + numsa ) __lowerCamelCase, __lowerCamelCase = divmod(len(A__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = [float(x) for x in input('Enter the elements of first array: ').split()] UpperCAmelCase_ = [float(x) for x in input('Enter the elements of second array: ').split()] print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
80
1
from __future__ import annotations def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): __lowerCamelCase, __lowerCamelCase = array[indexa], array[indexa] def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if length > 1: __lowerCamelCase = int(length / 2 ) for i in range(A__ , low + middle ): comp_and_swap(A__ , A__ , i + middle , A__ ) bitonic_merge(A__ , A__ , A__ , A__ ) bitonic_merge(A__ , low + middle , A__ , A__ ) def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if length > 1: __lowerCamelCase = int(length / 2 ) bitonic_sort(A__ , A__ , A__ , 1 ) bitonic_sort(A__ , low + middle , A__ , 0 ) bitonic_merge(A__ , A__ , A__ , A__ ) if __name__ == "__main__": UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(',')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('\nSorted array in ascending order is: ', end='') print(*unsorted, sep=', ') bitonic_merge(unsorted, 0, len(unsorted), 0) print('Sorted array in descending order is: ', end='') print(*unsorted, sep=', ')
80
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: str ): __lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) __lowerCamelCase = get_activation("""gelu""" ) self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) ) self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) ) def lowerCAmelCase__ ( self: str ): __lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) __lowerCamelCase = get_activation("""gelu""" ) __lowerCamelCase = get_activation("""gelu_10""" ) __lowerCamelCase = torch_builtin(UpperCamelCase_ ) __lowerCamelCase = geluaa(UpperCamelCase_ ) __lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def lowerCAmelCase__ ( self: str ): get_activation("""gelu""" ) get_activation("""gelu_10""" ) get_activation("""gelu_fast""" ) get_activation("""gelu_new""" ) get_activation("""gelu_python""" ) get_activation("""gelu_pytorch_tanh""" ) get_activation("""linear""" ) get_activation("""mish""" ) get_activation("""quick_gelu""" ) get_activation("""relu""" ) get_activation("""sigmoid""" ) get_activation("""silu""" ) get_activation("""swish""" ) get_activation("""tanh""" ) with self.assertRaises(UpperCamelCase_ ): get_activation("""bogus""" ) with self.assertRaises(UpperCamelCase_ ): get_activation(UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = get_activation("""gelu""" ) __lowerCamelCase = 1 __lowerCamelCase = get_activation("""gelu""" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(UpperCamelCase_ ): __lowerCamelCase = acta.a
80
1
import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCamelCase__( unittest.TestCase): @property def lowerCAmelCase__ ( self: int ): torch.manual_seed(0 ) __lowerCamelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = self.dummy_uncond_unet __lowerCamelCase = PNDMScheduler() __lowerCamelCase = PNDMPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) pndm.to(UpperCamelCase_ ) pndm.set_progress_bar_config(disable=UpperCamelCase_ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = pndm(generator=UpperCamelCase_ , num_inference_steps=20 , output_type="""numpy""" ).images __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = pndm(generator=UpperCamelCase_ , num_inference_steps=20 , output_type="""numpy""" , return_dict=UpperCamelCase_ )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCamelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: int ): __lowerCamelCase = """google/ddpm-cifar10-32""" __lowerCamelCase = UNetaDModel.from_pretrained(UpperCamelCase_ ) __lowerCamelCase = PNDMScheduler() __lowerCamelCase = PNDMPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) pndm.to(UpperCamelCase_ ) pndm.set_progress_bar_config(disable=UpperCamelCase_ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = pndm(generator=UpperCamelCase_ , output_type="""numpy""" ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCamelCase = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
80
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class lowerCamelCase__( __lowerCamelCase): @slow @require_torch def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) __lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" ) __lowerCamelCase = bertabert.config.encoder.vocab_size __lowerCamelCase = tokenizer.sep_token_id __lowerCamelCase = tokenizer.cls_token_id __lowerCamelCase = 1_28 __lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) __lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) __lowerCamelCase = train_dataset.select(range(32 ) ) __lowerCamelCase = val_dataset.select(range(16 ) ) __lowerCamelCase = 4 def _map_to_encoder_decoder_inputs(UpperCamelCase_: List[Any] ): # Tokenizer will automatically set [BOS] <text> [EOS] __lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=5_12 ) __lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=1_28 ) __lowerCamelCase = inputs.input_ids __lowerCamelCase = inputs.attention_mask __lowerCamelCase = outputs.input_ids __lowerCamelCase = outputs.input_ids.copy() __lowerCamelCase = [ [-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] __lowerCamelCase = outputs.attention_mask assert all(len(UpperCamelCase_ ) == 5_12 for x in inputs.input_ids ) assert all(len(UpperCamelCase_ ) == 1_28 for x in outputs.input_ids ) return batch def _compute_metrics(UpperCamelCase_: int ): __lowerCamelCase = pred.label_ids __lowerCamelCase = pred.predictions # all unnecessary tokens are removed __lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) __lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) __lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ ) return {"accuracy": accuracy} # map train dataset __lowerCamelCase = train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset __lowerCamelCase = val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) __lowerCamelCase = self.get_auto_remove_tmp_dir() __lowerCamelCase = SeqaSeqTrainingArguments( output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy="""steps""" , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer __lowerCamelCase = SeqaSeqTrainer( model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , ) # start training trainer.train()
80
1
def lowerCamelCase__ ( ): '''simple docstring''' for n in range(1 , 1000000 ): yield n * (n + 1) // 2 def lowerCamelCase__ ( A__ : List[str] ): '''simple docstring''' __lowerCamelCase = 1 __lowerCamelCase = 2 while i * i <= n: __lowerCamelCase = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def lowerCamelCase__ ( ): '''simple docstring''' return next(i for i in triangle_number_generator() if count_divisors(A__ ) > 500 ) if __name__ == "__main__": print(solution())
80
class lowerCamelCase__: # Public class to implement a graph def __init__( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ): __lowerCamelCase = row __lowerCamelCase = col __lowerCamelCase = graph def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ): return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ): # Checking all 8 elements surrounding nth element __lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1] __lowerCamelCase = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] ): # And finally, count all islands. __lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )] __lowerCamelCase = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) count += 1 return count
80
1
from bisect import bisect from itertools import accumulate def lowerCamelCase__ ( A__ : Any , A__ : str , A__ : List[str] , A__ : Optional[int] ): '''simple docstring''' __lowerCamelCase = sorted(zip(A__ , A__ ) , key=lambda A__ : x[0] / x[1] , reverse=A__ ) __lowerCamelCase, __lowerCamelCase = [i[0] for i in r], [i[1] for i in r] __lowerCamelCase = list(accumulate(A__ ) ) __lowerCamelCase = bisect(A__ , A__ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
80
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) def lowerCamelCase__ ( A__ : str ): '''simple docstring''' __lowerCamelCase = DPTConfig() if "large" in checkpoint_url: __lowerCamelCase = 1024 __lowerCamelCase = 4096 __lowerCamelCase = 24 __lowerCamelCase = 16 __lowerCamelCase = [5, 11, 17, 23] __lowerCamelCase = [256, 512, 1024, 1024] __lowerCamelCase = (1, 384, 384) if "ade" in checkpoint_url: __lowerCamelCase = True __lowerCamelCase = 150 __lowerCamelCase = """huggingface/label-files""" __lowerCamelCase = """ade20k-id2label.json""" __lowerCamelCase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="""dataset""" ) ) , """r""" ) ) __lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()} __lowerCamelCase = idalabel __lowerCamelCase = {v: k for k, v in idalabel.items()} __lowerCamelCase = [1, 150, 480, 480] return config, expected_shape def lowerCamelCase__ ( A__ : Dict ): '''simple docstring''' __lowerCamelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(A__ , A__ ) def lowerCamelCase__ ( A__ : Dict ): '''simple docstring''' if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): __lowerCamelCase = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: __lowerCamelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: __lowerCamelCase = name.replace("""patch_embed""" , """patch_embeddings""" ) if "pos_embed" in name: __lowerCamelCase = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: __lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: __lowerCamelCase = name.replace("""proj""" , """projection""" ) if "blocks" in name: __lowerCamelCase = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: __lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name: __lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: __lowerCamelCase = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: __lowerCamelCase = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: __lowerCamelCase = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: __lowerCamelCase = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: __lowerCamelCase = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: __lowerCamelCase = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: __lowerCamelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 __lowerCamelCase = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' ) if "out_conv" in name: __lowerCamelCase = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: __lowerCamelCase = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: __lowerCamelCase = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: __lowerCamelCase = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: __lowerCamelCase = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: __lowerCamelCase = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: __lowerCamelCase = name.replace("""bn""" , """batch_norm""" ) if "head" in name: __lowerCamelCase = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: __lowerCamelCase = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: __lowerCamelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" ) return name def lowerCamelCase__ ( A__ : Tuple , A__ : Any ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' ) __lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __lowerCamelCase = in_proj_weight[: config.hidden_size, :] __lowerCamelCase = in_proj_bias[: config.hidden_size] __lowerCamelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __lowerCamelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __lowerCamelCase = in_proj_weight[ -config.hidden_size :, : ] __lowerCamelCase = in_proj_bias[-config.hidden_size :] def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def lowerCamelCase__ ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : List[str] , A__ : Union[str, Any] ): '''simple docstring''' __lowerCamelCase, __lowerCamelCase = get_dpt_config(A__ ) # load original state_dict from URL __lowerCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(A__ ) # rename keys for key in state_dict.copy().keys(): __lowerCamelCase = state_dict.pop(A__ ) __lowerCamelCase = val # read in qkv matrices read_in_q_k_v(A__ , A__ ) # load HuggingFace model __lowerCamelCase = DPTForSemanticSegmentation(A__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(A__ ) model.load_state_dict(A__ ) model.eval() # Check outputs on an image __lowerCamelCase = 480 if """ade""" in checkpoint_url else 384 __lowerCamelCase = DPTImageProcessor(size=A__ ) __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(A__ , return_tensors="""pt""" ) # forward pass __lowerCamelCase = model(**A__ ).logits if """ade""" in checkpoint_url else model(**A__ ).predicted_depth # Assert logits __lowerCamelCase = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] ) if "ade" in checkpoint_url: __lowerCamelCase = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] ) assert outputs.shape == torch.Size(A__ ) assert ( torch.allclose(outputs[0, 0, :3, :3] , A__ , atol=1E-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , A__ ) ) Path(A__ ).mkdir(exist_ok=A__ ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(A__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(A__ ) if push_to_hub: print("""Pushing model to hub...""" ) model.push_to_hub( repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=A__ , ) image_processor.push_to_hub( repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=A__ , ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) UpperCAmelCase_ = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
80
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
80
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
80
1
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Dict = 'char' UpperCAmelCase__ : Union[str, Any] = 'bpe' UpperCAmelCase__ : str = 'wp' UpperCAmelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Optional[int] = ['image_processor', 'char_tokenizer'] UpperCAmelCase__ : int = 'ViTImageProcessor' UpperCAmelCase__ : Any = 'MgpstrTokenizer' def __init__( self: Union[str, Any] , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: Any=None , **UpperCamelCase_: Union[str, Any] ): __lowerCamelCase = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , UpperCamelCase_ , ) __lowerCamelCase = kwargs.pop("""feature_extractor""" ) __lowerCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) __lowerCamelCase = tokenizer __lowerCamelCase = AutoTokenizer.from_pretrained("""gpt2""" ) __lowerCamelCase = AutoTokenizer.from_pretrained("""bert-base-uncased""" ) super().__init__(UpperCamelCase_ , UpperCamelCase_ ) def __call__( self: Optional[Any] , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Tuple=None , UpperCamelCase_: Optional[Any]=None , **UpperCamelCase_: Union[str, Any] ): if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: __lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ ) if text is not None: __lowerCamelCase = self.char_tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ ) if text is None: return inputs elif images is None: return encodings else: __lowerCamelCase = encodings["""input_ids"""] return inputs def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Any ): __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = sequences __lowerCamelCase = char_preds.size(0 ) __lowerCamelCase, __lowerCamelCase = self._decode_helper(UpperCamelCase_ , """char""" ) __lowerCamelCase, __lowerCamelCase = self._decode_helper(UpperCamelCase_ , """bpe""" ) __lowerCamelCase, __lowerCamelCase = self._decode_helper(UpperCamelCase_ , """wp""" ) __lowerCamelCase = [] __lowerCamelCase = [] for i in range(UpperCamelCase_ ): __lowerCamelCase = [char_scores[i], bpe_scores[i], wp_scores[i]] __lowerCamelCase = [char_strs[i], bpe_strs[i], wp_strs[i]] __lowerCamelCase = scores.index(max(UpperCamelCase_ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) __lowerCamelCase = {} __lowerCamelCase = final_strs __lowerCamelCase = final_scores __lowerCamelCase = char_strs __lowerCamelCase = bpe_strs __lowerCamelCase = wp_strs return out def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple ): if format == DecodeType.CHARACTER: __lowerCamelCase = self.char_decode __lowerCamelCase = 1 __lowerCamelCase = """[s]""" elif format == DecodeType.BPE: __lowerCamelCase = self.bpe_decode __lowerCamelCase = 2 __lowerCamelCase = """#""" elif format == DecodeType.WORDPIECE: __lowerCamelCase = self.wp_decode __lowerCamelCase = 1_02 __lowerCamelCase = """[SEP]""" else: raise ValueError(F'Format {format} is not supported.' ) __lowerCamelCase, __lowerCamelCase = [], [] __lowerCamelCase = pred_logits.size(0 ) __lowerCamelCase = pred_logits.size(1 ) __lowerCamelCase, __lowerCamelCase = pred_logits.topk(1 , dim=-1 , largest=UpperCamelCase_ , sorted=UpperCamelCase_ ) __lowerCamelCase = preds_index.view(-1 , UpperCamelCase_ )[:, 1:] __lowerCamelCase = decoder(UpperCamelCase_ ) __lowerCamelCase, __lowerCamelCase = torch.nn.functional.softmax(UpperCamelCase_ , dim=2 ).max(dim=2 ) __lowerCamelCase = preds_max_prob[:, 1:] for index in range(UpperCamelCase_ ): __lowerCamelCase = preds_str[index].find(UpperCamelCase_ ) __lowerCamelCase = preds_str[index][:pred_eos] __lowerCamelCase = preds_index[index].cpu().tolist() __lowerCamelCase = pred_index.index(UpperCamelCase_ ) if eos_token in pred_index else -1 __lowerCamelCase = preds_max_prob[index][: pred_eos_index + 1] __lowerCamelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(UpperCamelCase_ ) conf_scores.append(UpperCamelCase_ ) return dec_strs, conf_scores def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Dict ): __lowerCamelCase = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(UpperCamelCase_ )] return decode_strs def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any ): return self.bpe_tokenizer.batch_decode(UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any ): __lowerCamelCase = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(UpperCamelCase_ )] return decode_strs
80
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Tuple = 'bert' def __init__( self: List[str] , UpperCamelCase_: str=3_05_22 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=5_12 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: List[str]=1E-12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: Tuple=True , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Optional[Any] , ): super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache __lowerCamelCase = classifier_dropout class lowerCamelCase__( __lowerCamelCase): @property def lowerCAmelCase__ ( self: Any ): if self.task == "multiple-choice": __lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __lowerCamelCase = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
80
1
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand UpperCAmelCase_ = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) UpperCAmelCase_ = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) UpperCAmelCase_ = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) UpperCAmelCase_ = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) UpperCAmelCase_ = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]), ('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [14, 13, 12, 11, 10]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) UpperCAmelCase_ = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) UpperCAmelCase_ = ( ('JH AH TH KH QH', 23), ('JH 9H TH KH QH', 22), ('JC KH JS JD JH', 21), ('KH KC 3S 3H 3D', 20), ('8C 9C 5C 3C TC', 19), ('JS QS 9H TS KH', 18), ('7C 7S KH 2H 7H', 17), ('3C KH 5D 5S KH', 16), ('QH 8H KD JH 8S', 15), ('2D 6D 9D TH 7D', 14), ) def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase, __lowerCamelCase = randrange(len(A__ ) ), randrange(len(A__ ) ) __lowerCamelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)] __lowerCamelCase, __lowerCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowerCamelCase__ ( A__ : int = 100 ): '''simple docstring''' return (generate_random_hand() for _ in range(A__ )) @pytest.mark.parametrize("""hand, expected""" , A__ ) def lowerCamelCase__ ( A__ : Optional[Any] , A__ : int ): '''simple docstring''' assert PokerHand(A__ )._is_flush() == expected @pytest.mark.parametrize("""hand, expected""" , A__ ) def lowerCamelCase__ ( A__ : List[str] , A__ : Optional[Any] ): '''simple docstring''' assert PokerHand(A__ )._is_straight() == expected @pytest.mark.parametrize("""hand, expected, card_values""" , A__ ) def lowerCamelCase__ ( A__ : List[Any] , A__ : Any , A__ : List[Any] ): '''simple docstring''' __lowerCamelCase = PokerHand(A__ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("""hand, expected""" , A__ ) def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[int] ): '''simple docstring''' assert PokerHand(A__ )._is_same_kind() == expected @pytest.mark.parametrize("""hand, expected""" , A__ ) def lowerCamelCase__ ( A__ : int , A__ : str ): '''simple docstring''' assert PokerHand(A__ )._hand_type == expected @pytest.mark.parametrize("""hand, other, expected""" , A__ ) def lowerCamelCase__ ( A__ : Optional[int] , A__ : List[str] , A__ : Tuple ): '''simple docstring''' assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected @pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() ) def lowerCamelCase__ ( A__ : int , A__ : Union[str, Any] , A__ : str ): '''simple docstring''' assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = [PokerHand(A__ ) for hand in SORTED_HANDS] __lowerCamelCase = poker_hands.copy() shuffle(A__ ) __lowerCamelCase = chain(sorted(A__ ) ) for index, hand in enumerate(A__ ): assert hand == poker_hands[index] def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )] pokerhands.sort(reverse=A__ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = PokerHand("""2C 4S AS 3D 5C""" ) __lowerCamelCase = True __lowerCamelCase = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = 0 __lowerCamelCase = os.path.abspath(os.path.dirname(A__ ) ) __lowerCamelCase = os.path.join(A__ , """poker_hands.txt""" ) with open(A__ ) as file_hand: for line in file_hand: __lowerCamelCase = line[:14].strip() __lowerCamelCase = line[15:].strip() __lowerCamelCase, __lowerCamelCase = PokerHand(A__ ), PokerHand(A__ ) __lowerCamelCase = player.compare_with(A__ ) if output == "Win": answer += 1 assert answer == 376
80
from __future__ import annotations from math import ceil, floor, sqrt def lowerCamelCase__ ( A__ : int = 2000000 ): '''simple docstring''' __lowerCamelCase = [0] __lowerCamelCase = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target __lowerCamelCase = 0 # the area corresponding to the grid that gives the product closest to target __lowerCamelCase = 0 # an estimate of b, using the quadratic formula __lowerCamelCase = 42 # the largest integer less than b_estimate __lowerCamelCase = 42 # the largest integer less than b_estimate __lowerCamelCase = 42 # the triangle number corresponding to b_floor __lowerCamelCase = 42 # the triangle number corresponding to b_ceil __lowerCamelCase = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): __lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 __lowerCamelCase = floor(A__ ) __lowerCamelCase = ceil(A__ ) __lowerCamelCase = triangle_numbers[b_floor] __lowerCamelCase = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): __lowerCamelCase = triangle_b_first_guess * triangle_a __lowerCamelCase = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): __lowerCamelCase = triangle_b_second_guess * triangle_a __lowerCamelCase = idx_a * b_ceil return area if __name__ == "__main__": print(f"""{solution() = }""")
80
1
import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset UpperCAmelCase_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class lowerCamelCase__( nn.Module): def __init__( self: Tuple , UpperCamelCase_: Any ): super().__init__() __lowerCamelCase = torchvision.models.resnetaaa(pretrained=UpperCamelCase_ ) __lowerCamelCase = list(model.children() )[:-2] __lowerCamelCase = nn.Sequential(*UpperCamelCase_ ) __lowerCamelCase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: int ): # Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048 __lowerCamelCase = self.pool(self.model(UpperCamelCase_ ) ) __lowerCamelCase = torch.flatten(UpperCamelCase_ , start_dim=2 ) __lowerCamelCase = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class lowerCamelCase__( __lowerCamelCase): def __init__( self: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] ): __lowerCamelCase = [json.loads(UpperCamelCase_ ) for l in open(UpperCamelCase_ )] __lowerCamelCase = os.path.dirname(UpperCamelCase_ ) __lowerCamelCase = tokenizer __lowerCamelCase = labels __lowerCamelCase = len(UpperCamelCase_ ) __lowerCamelCase = max_seq_length __lowerCamelCase = transforms def __len__( self: int ): return len(self.data ) def __getitem__( self: List[str] , UpperCamelCase_: Optional[Any] ): __lowerCamelCase = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase_ ) ) __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = sentence[0], sentence[1:-1], sentence[-1] __lowerCamelCase = sentence[: self.max_seq_length] __lowerCamelCase = torch.zeros(self.n_classes ) __lowerCamelCase = 1 __lowerCamelCase = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) __lowerCamelCase = self.transforms(UpperCamelCase_ ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase__ ( A__ : List[Any] ): '''simple docstring''' __lowerCamelCase = [len(row["""sentence"""] ) for row in batch] __lowerCamelCase, __lowerCamelCase = len(A__ ), max(A__ ) __lowerCamelCase = torch.zeros(A__ , A__ , dtype=torch.long ) __lowerCamelCase = torch.zeros(A__ , A__ , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(A__ , A__ ) ): __lowerCamelCase = input_row["""sentence"""] __lowerCamelCase = 1 __lowerCamelCase = torch.stack([row["""image"""] for row in batch] ) __lowerCamelCase = torch.stack([row["""label"""] for row in batch] ) __lowerCamelCase = torch.stack([row["""image_start_token"""] for row in batch] ) __lowerCamelCase = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase__ ( ): '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase__ ( ): '''simple docstring''' return transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
80
import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = [] __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCamelCase_ ) __lowerCamelCase = resnets __lowerCamelCase = attentions if self.add_downsample: __lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=True ): __lowerCamelCase = () for resnet, attn in zip(self.resnets , self.attentions ): __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) __lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: __lowerCamelCase = self.downsamplers_a(UpperCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = resnets if self.add_downsample: __lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=True ): __lowerCamelCase = () for resnet in self.resnets: __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: __lowerCamelCase = self.downsamplers_a(UpperCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = [] __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels __lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCamelCase_ ) __lowerCamelCase = resnets __lowerCamelCase = attentions if self.add_upsample: __lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any]=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states __lowerCamelCase = res_hidden_states_tuple[-1] __lowerCamelCase = res_hidden_states_tuple[:-1] __lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) __lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) if self.add_upsample: __lowerCamelCase = self.upsamplers_a(UpperCamelCase_ ) return hidden_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels __lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = resnets if self.add_upsample: __lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=True ): for resnet in self.resnets: # pop res hidden states __lowerCamelCase = res_hidden_states_tuple[-1] __lowerCamelCase = res_hidden_states_tuple[:-1] __lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) if self.add_upsample: __lowerCamelCase = self.upsamplers_a(UpperCamelCase_ ) return hidden_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: int ): # there is always at least one resnet __lowerCamelCase = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] __lowerCamelCase = [] for _ in range(self.num_layers ): __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCamelCase_ ) __lowerCamelCase = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = resnets __lowerCamelCase = attentions def __call__( self: int , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=True ): __lowerCamelCase = self.resnets[0](UpperCamelCase_ , UpperCamelCase_ ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): __lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) return hidden_states
80
1
def lowerCamelCase__ ( A__ : list ): '''simple docstring''' if len(A__ ) <= 1: return [tuple(A__ )] __lowerCamelCase = [] def generate(A__ : int , A__ : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , A__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even __lowerCamelCase, __lowerCamelCase = arr[k - 1], arr[i] else: # k is odd __lowerCamelCase, __lowerCamelCase = arr[k - 1], arr[0] generate(k - 1 , A__ ) generate(len(A__ ) , A__ ) return res if __name__ == "__main__": UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase_ = [int(item) for item in user_input.split(',')] print(heaps(arr))
80
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging UpperCAmelCase_ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt'] UpperCAmelCase_ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('0.9.0'): raise Exception('requires fairseq >= 0.9.0') logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = ' Hello world! cécé herlolip' UpperCAmelCase_ = [ ('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'), ('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'), ('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'), ('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'), ] def lowerCamelCase__ ( A__ : List[Any] ): '''simple docstring''' __lowerCamelCase = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """_float_tensor""", ] for k in ignore_keys: state_dict.pop(A__ , A__ ) def lowerCamelCase__ ( A__ : Tuple , A__ : Any , A__ : Union[str, Any] ): '''simple docstring''' __lowerCamelCase = dct.pop(A__ ) __lowerCamelCase = val def lowerCamelCase__ ( A__ : Tuple ): '''simple docstring''' __lowerCamelCase = torch.load(A__ , map_location="""cpu""" ) __lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval() hub_interface.model.load_state_dict(sd["""model"""] ) return hub_interface def lowerCamelCase__ ( A__ : List[Any] ): '''simple docstring''' __lowerCamelCase, __lowerCamelCase = emb.weight.shape __lowerCamelCase = nn.Linear(A__ , A__ , bias=A__ ) __lowerCamelCase = emb.weight.data return lin_layer @torch.no_grad() def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None ): '''simple docstring''' if not os.path.exists(A__ ): __lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , A__ ).eval() else: __lowerCamelCase = load_xsum_checkpoint(A__ ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: __lowerCamelCase = checkpoint_path.replace(""".""" , """-""" ) __lowerCamelCase = BartConfig.from_pretrained(A__ ) __lowerCamelCase = bart.encode(A__ ).unsqueeze(0 ) __lowerCamelCase = BartTokenizer.from_pretrained(A__ ).encode(A__ , return_tensors="""pt""" ).unsqueeze(0 ) if not torch.eq(A__ , A__ ).all(): raise ValueError( f'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' ) if checkpoint_path == "bart.large.mnli": __lowerCamelCase = bart.state_dict() remove_ignore_keys_(A__ ) __lowerCamelCase = state_dict["""model.decoder.embed_tokens.weight"""] for src, dest in mnli_rename_keys: rename_key(A__ , A__ , A__ ) __lowerCamelCase = BartForSequenceClassification(A__ ).eval() model.load_state_dict(A__ ) __lowerCamelCase = bart.predict("""mnli""" , A__ , return_logits=A__ ) __lowerCamelCase = model(A__ )[0] # logits else: # no classification heads to worry about __lowerCamelCase = bart.model.state_dict() remove_ignore_keys_(A__ ) __lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""] __lowerCamelCase = bart.extract_features(A__ ) if hf_checkpoint_name == "facebook/bart-large": __lowerCamelCase = BartModel(A__ ).eval() model.load_state_dict(A__ ) __lowerCamelCase = model(A__ ).model[0] else: __lowerCamelCase = BartForConditionalGeneration(A__ ).eval() # an existing summarization ckpt model.model.load_state_dict(A__ ) if hasattr(A__ , """lm_head""" ): __lowerCamelCase = make_linear_from_emb(model.model.shared ) __lowerCamelCase = model.model(A__ )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( f'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum' ) UpperCAmelCase_ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
80
1
import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument UpperCAmelCase_ = { '/attention/': '/0/SelfAttention/', '/self_attention/': '/0/SelfAttention/', '/encoder_decoder_attention/': '/1/EncDecAttention/', 'value': 'v', 'query': 'q', 'key': 'k', 'out': 'o', 'pre_self_attention_layer_norm': '0/layer_norm', 'pre_cross_attention_layer_norm': '1/layer_norm', 'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong 'token_embedder': 'shared', 'encoder_norm': 'final_layer_norm', 'decoder_norm': 'final_layer_norm', 'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight', 'router/router_weights/w/': 'router/classifier/', 'roer/roer_weights/w/': 'router/classifier/', 'logits_dense': 'lm_head', } def lowerCamelCase__ ( A__ : Optional[int] ): '''simple docstring''' __lowerCamelCase = list(s_dict.keys() ) for key in keys: __lowerCamelCase = R""".*/layers_(\d+)""" __lowerCamelCase = key if re.match(A__ , A__ ): __lowerCamelCase = re.sub(R"""layers_(\d+)""" , R"""block/\1/layer""" , A__ ) __lowerCamelCase = R"""(encoder|decoder)\/""" if re.match(A__ , A__ ): __lowerCamelCase = re.match(A__ , A__ ).groups() if groups[0] == "encoder": __lowerCamelCase = re.sub(R"""/mlp/""" , R"""/1/mlp/""" , A__ ) __lowerCamelCase = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/1/layer_norm/""" , A__ ) elif groups[0] == "decoder": __lowerCamelCase = re.sub(R"""/mlp/""" , R"""/2/mlp/""" , A__ ) __lowerCamelCase = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/2/layer_norm/""" , A__ ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: __lowerCamelCase = new_key.replace(A__ , A__ ) print(f'{key} -> {new_key}' ) __lowerCamelCase = s_dict.pop(A__ ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: __lowerCamelCase = s_dict[ """encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight""" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: __lowerCamelCase = s_dict[ """decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight""" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: __lowerCamelCase = s_dict[key].shape[0] __lowerCamelCase = s_dict[key] for idx in range(A__ ): __lowerCamelCase = expert_weihts[idx] print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' ) s_dict.pop(A__ ) return s_dict UpperCAmelCase_ = { 'NUM_ENCODER_LAYERS': 'num_layers', 'NUM_DECODER_LAYERS': 'num_decoder_layers', 'NUM_HEADS': 'num_heads', 'HEAD_DIM': 'd_kv', 'EMBED_DIM': 'd_model', 'MLP_DIM': 'd_ff', 'NUM_SELECTED_EXPERTS': 'num_selected_experts', 'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers', 'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers', 'dense.MlpBlock.activations': 'feed_forward_proj', } def lowerCamelCase__ ( A__ : List[str] , A__ : Tuple ): '''simple docstring''' import regex as re with open(A__ , """r""" ) as f: __lowerCamelCase = f.read() __lowerCamelCase = re.findall(R"""(.*) = ([0-9.]*)""" , A__ ) __lowerCamelCase = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": __lowerCamelCase = float(A__ ) if """.""" in value else int(A__ ) __lowerCamelCase = re.findall(R"""(.*activations) = \(\'(.*)\',\)""" , A__ )[0] __lowerCamelCase = str(activation[1] ) __lowerCamelCase = num_experts __lowerCamelCase = SwitchTransformersConfig(**A__ ) return config def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[Any] , A__ : Dict=None , A__ : List[Any]="./" , A__ : Optional[int]=8 ): '''simple docstring''' print(f'Loading flax weights from : {flax_checkpoint_path}' ) __lowerCamelCase = checkpoints.load_tax_checkpoint(A__ ) if gin_file is not None: __lowerCamelCase = convert_gin_to_config(A__ , A__ ) else: __lowerCamelCase = SwitchTransformersConfig.from_pretrained(A__ ) __lowerCamelCase = SwitchTransformersForConditionalGeneration(A__ ) __lowerCamelCase = flax_params["""target"""] __lowerCamelCase = flatten_dict(A__ , sep="""/""" ) __lowerCamelCase = rename_keys(A__ ) __lowerCamelCase = unflatten_dict(A__ , sep="""/""" ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(A__ , A__ ) print(f'Save PyTorch model to {pytorch_dump_path}' ) pt_model.save_pretrained(A__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--switch_t5x_checkpoint_path', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the' ' model architecture. If not provided, a `gin_file` has to be provided.' ), ) parser.add_argument( '--gin_file', default=None, type=str, required=False, help='Path to the gin config file. If not provided, a `config_file` has to be passed ', ) parser.add_argument( '--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.' ) parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts') UpperCAmelCase_ = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
80
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class lowerCamelCase__: def __init__( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=14 , UpperCamelCase_: int=7 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: int=99 , UpperCamelCase_: str=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: Dict=16 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: List[str]=3 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Tuple=None , ): __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_input_mask __lowerCamelCase = use_labels __lowerCamelCase = use_mc_token_ids __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope __lowerCamelCase = self.vocab_size - 1 def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None if self.use_mc_token_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() __lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase__ ( self: Dict ): return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[str] , *UpperCamelCase_: Optional[Any] ): __lowerCamelCase = CTRLModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ ) model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , *UpperCamelCase_: Tuple ): __lowerCamelCase = CTRLLMHeadModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ) = config_and_inputs __lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask} return config, inputs_dict def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , *UpperCamelCase_: Union[str, Any] ): __lowerCamelCase = self.num_labels __lowerCamelCase = CTRLForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Any = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () UpperCAmelCase__ : Optional[Any] = (CTRLLMHeadModel,) if is_torch_available() else () UpperCAmelCase__ : int = ( { 'feature-extraction': CTRLModel, 'text-classification': CTRLForSequenceClassification, 'text-generation': CTRLLMHeadModel, 'zero-shot': CTRLForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ : List[str] = True UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Optional[Any] = False def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = CTRLModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 ) def lowerCAmelCase__ ( self: Optional[int] ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self: Optional[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase__ ( self: List[Any] ): pass @slow def lowerCAmelCase__ ( self: Optional[Any] ): for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = CTRLModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :) def lowerCAmelCase__ ( self: Optional[Any] ): pass @require_torch class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: List[str] ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" ) model.to(UpperCamelCase_ ) __lowerCamelCase = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=UpperCamelCase_ ) # Legal the president is __lowerCamelCase = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a __lowerCamelCase = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ ) self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
80
1
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase__: def __init__( self: Any , UpperCamelCase_: int , UpperCamelCase_: Dict=13 , UpperCamelCase_: Union[str, Any]=7 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: List[Any]=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: int=True , UpperCamelCase_: int=False , UpperCamelCase_: List[Any]=False , UpperCamelCase_: List[Any]=False , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: int=99 , UpperCamelCase_: List[Any]=0 , UpperCamelCase_: Optional[int]=32 , UpperCamelCase_: str=5 , UpperCamelCase_: int=4 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: str=5_12 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: Tuple=0.02 , UpperCamelCase_: int=2 , UpperCamelCase_: str=4 , UpperCamelCase_: Optional[Any]="last" , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=0 , ): __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_lengths __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = gelu_activation __lowerCamelCase = sinusoidal_embeddings __lowerCamelCase = causal __lowerCamelCase = asm __lowerCamelCase = n_langs __lowerCamelCase = vocab_size __lowerCamelCase = n_special __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = summary_type __lowerCamelCase = use_proj __lowerCamelCase = scope __lowerCamelCase = bos_token_id def lowerCAmelCase__ ( self: int ): __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_input_lengths: __lowerCamelCase = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , 2 ).float() __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowerCAmelCase__ ( self: Optional[int] ): return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: int , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int] , ): __lowerCamelCase = XLMModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , lengths=UpperCamelCase_ , langs=UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ , langs=UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self: str , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: List[Any] , ): __lowerCamelCase = XLMWithLMHeadModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: Any , ): __lowerCamelCase = XLMForQuestionAnsweringSimple(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ ) __lowerCamelCase = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: Tuple , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Any , ): __lowerCamelCase = XLMForQuestionAnswering(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ ) __lowerCamelCase = model( UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , p_mask=UpperCamelCase_ , ) __lowerCamelCase = model( UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , ) ((__lowerCamelCase), ) = result_with_labels.to_tuple() __lowerCamelCase = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ ) ((__lowerCamelCase), ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int] , ): __lowerCamelCase = XLMForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: str , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[int] , ): __lowerCamelCase = self.num_labels __lowerCamelCase = XLMForTokenClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: str , ): __lowerCamelCase = self.num_choices __lowerCamelCase = XLMForMultipleChoice(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ) = config_and_inputs __lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths} return config, inputs_dict @require_torch class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : int = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) UpperCAmelCase__ : List[Any] = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable UpperCAmelCase__ : Dict = ( { 'feature-extraction': XLMModel, 'fill-mask': XLMWithLMHeadModel, 'question-answering': XLMForQuestionAnsweringSimple, 'text-classification': XLMForSequenceClassification, 'text-generation': XLMWithLMHeadModel, 'token-classification': XLMForTokenClassification, 'zero-shot': XLMForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any] ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict=False ): __lowerCamelCase = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ ) __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ ) return inputs_dict def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = XLMModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , emb_dim=37 ) def lowerCAmelCase__ ( self: Tuple ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: str ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Any , UpperCamelCase_: Dict , UpperCamelCase_: List[Any]=False , UpperCamelCase_: List[Any]=1 ): self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) self.assertListEqual( [isinstance(UpperCamelCase_ , UpperCamelCase_ ) for iter_attentions in attentions] , [True] * len(UpperCamelCase_ ) ) self.assertEqual(len(UpperCamelCase_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(UpperCamelCase_ ): # adds PAD dummy token __lowerCamelCase = min_length + idx + 1 __lowerCamelCase = min_length + idx + 1 __lowerCamelCase = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCamelCase_ ) ) def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=False , UpperCamelCase_: Union[str, Any]=1 ): self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) self.assertListEqual( [isinstance(UpperCamelCase_ , UpperCamelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(UpperCamelCase_ ) , ) self.assertEqual(len(UpperCamelCase_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(UpperCamelCase_ ): # adds PAD dummy token __lowerCamelCase = min_length + idx + 1 __lowerCamelCase = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCamelCase_ ) , ) pass @slow def lowerCAmelCase__ ( self: Union[str, Any] ): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = XLMModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @require_torch class lowerCamelCase__( unittest.TestCase): @slow def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" ) model.to(UpperCamelCase_ ) __lowerCamelCase = torch.tensor([[14, 4_47]] , dtype=torch.long , device=UpperCamelCase_ ) # the president __lowerCamelCase = [ 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference __lowerCamelCase = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCamelCase_ )
80
def lowerCamelCase__ ( A__ : int = 2000000 ): '''simple docstring''' __lowerCamelCase = [0 for i in range(n + 1 )] __lowerCamelCase = 1 __lowerCamelCase = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , A__ ): __lowerCamelCase = 1 __lowerCamelCase = 0 for i in range(A__ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f"""{solution() = }""")
80
1
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def lowerCamelCase__ ( A__ : Optional[Any] ): '''simple docstring''' __lowerCamelCase = FileLock(str(tmpdir / """foo.lock""" ) ) __lowerCamelCase = FileLock(str(tmpdir / """foo.lock""" ) ) __lowerCamelCase = 0.01 with locka.acquire(): with pytest.raises(A__ ): __lowerCamelCase = time.time() locka.acquire(A__ ) assert time.time() - _start > timeout def lowerCamelCase__ ( A__ : Dict ): '''simple docstring''' __lowerCamelCase = """a""" * 1000 + """.lock""" __lowerCamelCase = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(A__ ) assert len(os.path.basename(locka._lock_file ) ) <= 255 __lowerCamelCase = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(A__ ): locka.acquire(0 )
80
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase): UpperCAmelCase__ : Dict = 1 @register_to_config def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None ): # set `betas`, `alphas`, `timesteps` self.set_timesteps(UpperCamelCase_ ) # standard deviation of the initial noise distribution __lowerCamelCase = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. __lowerCamelCase = 4 # running values __lowerCamelCase = [] def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None ): __lowerCamelCase = num_inference_steps __lowerCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] __lowerCamelCase = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: __lowerCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: __lowerCamelCase = torch.sin(steps * math.pi / 2 ) ** 2 __lowerCamelCase = (1.0 - self.betas**2) ** 0.5 __lowerCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] __lowerCamelCase = timesteps.to(UpperCamelCase_ ) __lowerCamelCase = [] def lowerCAmelCase__ ( self: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True , ): if self.num_inference_steps is None: raise ValueError( """Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" ) __lowerCamelCase = (self.timesteps == timestep).nonzero().item() __lowerCamelCase = timestep_index + 1 __lowerCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(UpperCamelCase_ ) if len(self.ets ) == 1: __lowerCamelCase = self.ets[-1] elif len(self.ets ) == 2: __lowerCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: __lowerCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: __lowerCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) __lowerCamelCase = self._get_prev_sample(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ): return sample def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any ): __lowerCamelCase = self.alphas[timestep_index] __lowerCamelCase = self.betas[timestep_index] __lowerCamelCase = self.alphas[prev_timestep_index] __lowerCamelCase = self.betas[prev_timestep_index] __lowerCamelCase = (sample - sigma * ets) / max(UpperCamelCase_ , 1E-8 ) __lowerCamelCase = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self: List[Any] ): return self.config.num_train_timesteps
80
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Any = 'naver-clova-ix/donut-base-finetuned-docvqa' UpperCAmelCase__ : List[Any] = ( 'This is a tool that answers a question about an document (pdf). It takes an input named `document` which ' 'should be the document containing the information, as well as a `question` that is the question about the ' 'document. It returns a text that contains the answer to the question.' ) UpperCAmelCase__ : List[str] = 'document_qa' UpperCAmelCase__ : Dict = AutoProcessor UpperCAmelCase__ : List[Any] = VisionEncoderDecoderModel UpperCAmelCase__ : str = ['image', 'text'] UpperCAmelCase__ : List[Any] = ['text'] def __init__( self: List[Any] , *UpperCamelCase_: Any , **UpperCamelCase_: List[Any] ): if not is_vision_available(): raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: "Image" , UpperCamelCase_: str ): __lowerCamelCase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>""" __lowerCamelCase = task_prompt.replace("""{user_input}""" , UpperCamelCase_ ) __lowerCamelCase = self.pre_processor.tokenizer( UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors="""pt""" ).input_ids __lowerCamelCase = self.pre_processor(UpperCamelCase_ , return_tensors="""pt""" ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Union[str, Any] ): return self.model.generate( inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCamelCase_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCamelCase_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCamelCase_ , ).sequences def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict ): __lowerCamelCase = self.pre_processor.batch_decode(UpperCamelCase_ )[0] __lowerCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" ) __lowerCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" ) __lowerCamelCase = re.sub(r"""<.*?>""" , """""" , UpperCamelCase_ , count=1 ).strip() # remove first task start token __lowerCamelCase = self.pre_processor.tokenajson(UpperCamelCase_ ) return sequence["answer"]
80
import os from collections.abc import Iterator def lowerCamelCase__ ( A__ : str = "." ): '''simple docstring''' for dir_path, dir_names, filenames in os.walk(A__ ): __lowerCamelCase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(A__ )[1] in (".py", ".ipynb"): yield os.path.join(A__ , A__ ).lstrip("""./""" ) def lowerCamelCase__ ( A__ : Optional[int] ): '''simple docstring''' return f'{i * " "}*' if i else "\n##" def lowerCamelCase__ ( A__ : str , A__ : str ): '''simple docstring''' __lowerCamelCase = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part: print(f'{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}' ) return new_path def lowerCamelCase__ ( A__ : str = "." ): '''simple docstring''' __lowerCamelCase = """""" for filepath in sorted(good_file_paths(A__ ) ): __lowerCamelCase, __lowerCamelCase = os.path.split(A__ ) if filepath != old_path: __lowerCamelCase = print_path(A__ , A__ ) __lowerCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0 __lowerCamelCase = f'{filepath}/{filename}'.replace(""" """ , """%20""" ) __lowerCamelCase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0] print(f'{md_prefix(A__ )} [{filename}]({url})' ) if __name__ == "__main__": print_directory_md('.')
80
1
from math import isclose, sqrt def lowerCamelCase__ ( A__ : float , A__ : float , A__ : float ): '''simple docstring''' __lowerCamelCase = point_y / 4 / point_x __lowerCamelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) __lowerCamelCase = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) __lowerCamelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 __lowerCamelCase = outgoing_gradient**2 + 4 __lowerCamelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) __lowerCamelCase = (point_y - outgoing_gradient * point_x) ** 2 - 100 __lowerCamelCase = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) __lowerCamelCase = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point __lowerCamelCase = x_minus if isclose(A__ , A__ ) else x_plus __lowerCamelCase = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def lowerCamelCase__ ( A__ : float = 1.4 , A__ : float = -9.6 ): '''simple docstring''' __lowerCamelCase = 0 __lowerCamelCase = first_x_coord __lowerCamelCase = first_y_coord __lowerCamelCase = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = next_point(A__ , A__ , A__ ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f"""{solution() = }""")
80
from __future__ import annotations def lowerCamelCase__ ( A__ : list ): '''simple docstring''' if not nums: raise ValueError("""List is empty""" ) return sum(A__ ) / len(A__ ) if __name__ == "__main__": import doctest doctest.testmod()
80
1
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase__: def __init__( self: int , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any]=13 , UpperCamelCase_: str=32 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Any=4 , UpperCamelCase_: int=[10, 20, 30, 40] , UpperCamelCase_: List[Any]=[2, 2, 3, 2] , UpperCamelCase_: str=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: Dict=37 , UpperCamelCase_: List[Any]="gelu" , UpperCamelCase_: Any=10 , UpperCamelCase_: str=0.02 , UpperCamelCase_: Optional[Any]=["stage2", "stage3", "stage4"] , UpperCamelCase_: Union[str, Any]=[2, 3, 4] , UpperCamelCase_: List[str]=None , ): __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = num_channels __lowerCamelCase = num_stages __lowerCamelCase = hidden_sizes __lowerCamelCase = depths __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = num_labels __lowerCamelCase = initializer_range __lowerCamelCase = out_features __lowerCamelCase = out_indices __lowerCamelCase = scope def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels ) __lowerCamelCase = self.get_config() return config, pixel_values, labels def lowerCAmelCase__ ( self: Dict ): return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: List[Any] , UpperCamelCase_: str ): __lowerCamelCase = ConvNextModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: int , UpperCamelCase_: str ): __lowerCamelCase = ConvNextForImageClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase__ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any] ): __lowerCamelCase = ConvNextBackbone(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __lowerCamelCase = None __lowerCamelCase = ConvNextBackbone(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs __lowerCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Dict = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) UpperCAmelCase__ : Dict = ( {'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification} if is_torch_available() else {} ) UpperCAmelCase__ : Tuple = True UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : int = False UpperCAmelCase__ : List[Any] = False UpperCAmelCase__ : Union[str, Any] = False def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = ConvNextModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 ) def lowerCAmelCase__ ( self: Union[str, Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase__ ( self: Union[str, Any] ): return @unittest.skip(reason="""ConvNext does not use inputs_embeds""" ) def lowerCAmelCase__ ( self: Optional[int] ): pass @unittest.skip(reason="""ConvNext does not support input and output embeddings""" ) def lowerCAmelCase__ ( self: List[Any] ): pass @unittest.skip(reason="""ConvNext does not use feedforward chunking""" ) def lowerCAmelCase__ ( self: Any ): pass def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(UpperCamelCase_ ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] ): def check_hidden_states_output(UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Tuple ): __lowerCamelCase = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): __lowerCamelCase = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) __lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowerCamelCase = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ ) @slow def lowerCAmelCase__ ( self: List[str] ): for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = ConvNextModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCamelCase__( unittest.TestCase): @cached_property def lowerCAmelCase__ ( self: Dict ): return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None @slow def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(UpperCamelCase_ ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**UpperCamelCase_ ) # verify the logits __lowerCamelCase = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) __lowerCamelCase = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) ) @require_torch class lowerCamelCase__( unittest.TestCase , __lowerCamelCase): UpperCAmelCase__ : int = (ConvNextBackbone,) if is_torch_available() else () UpperCAmelCase__ : Tuple = ConvNextConfig UpperCAmelCase__ : List[str] = False def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = ConvNextModelTester(self )
80
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase): UpperCAmelCase__ : Any = 'maskformer-swin' UpperCAmelCase__ : List[Any] = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self: Any , UpperCamelCase_: Any=2_24 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Optional[int]=96 , UpperCamelCase_: List[str]=[2, 2, 6, 2] , UpperCamelCase_: Optional[Any]=[3, 6, 12, 24] , UpperCamelCase_: str=7 , UpperCamelCase_: int=4.0 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=1E-5 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] , ): super().__init__(**UpperCamelCase_ ) __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = num_channels __lowerCamelCase = embed_dim __lowerCamelCase = depths __lowerCamelCase = len(UpperCamelCase_ ) __lowerCamelCase = num_heads __lowerCamelCase = window_size __lowerCamelCase = mlp_ratio __lowerCamelCase = qkv_bias __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = drop_path_rate __lowerCamelCase = hidden_act __lowerCamelCase = use_absolute_embeddings __lowerCamelCase = layer_norm_eps __lowerCamelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowerCamelCase = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) ) __lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(UpperCamelCase_ ) + 1 )] __lowerCamelCase, __lowerCamelCase = get_aligned_output_features_output_indices( out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
80
1
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version UpperCAmelCase_ = version.parse(importlib_metadata.version('nltk')) if NLTK_VERSION >= version.Version('3.6.4'): from nltk import word_tokenize UpperCAmelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n' UpperCAmelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n' UpperCAmelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class lowerCamelCase__( datasets.Metric): def lowerCAmelCase__ ( self: Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[ """https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""", """https://en.wikipedia.org/wiki/METEOR""", ] , ) def lowerCAmelCase__ ( self: Any , UpperCamelCase_: str ): import nltk nltk.download("""wordnet""" ) if NLTK_VERSION >= version.Version("""3.6.5""" ): nltk.download("""punkt""" ) if NLTK_VERSION >= version.Version("""3.6.6""" ): nltk.download("""omw-1.4""" ) def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Any=0.9 , UpperCamelCase_: Optional[Any]=3 , UpperCamelCase_: Union[str, Any]=0.5 ): if NLTK_VERSION >= version.Version("""3.6.5""" ): __lowerCamelCase = [ meteor_score.single_meteor_score( word_tokenize(UpperCamelCase_ ) , word_tokenize(UpperCamelCase_ ) , alpha=UpperCamelCase_ , beta=UpperCamelCase_ , gamma=UpperCamelCase_ ) for ref, pred in zip(UpperCamelCase_ , UpperCamelCase_ ) ] else: __lowerCamelCase = [ meteor_score.single_meteor_score(UpperCamelCase_ , UpperCamelCase_ , alpha=UpperCamelCase_ , beta=UpperCamelCase_ , gamma=UpperCamelCase_ ) for ref, pred in zip(UpperCamelCase_ , UpperCamelCase_ ) ] return {"meteor": np.mean(UpperCamelCase_ )}
80
from __future__ import annotations def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): __lowerCamelCase, __lowerCamelCase = array[indexa], array[indexa] def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if length > 1: __lowerCamelCase = int(length / 2 ) for i in range(A__ , low + middle ): comp_and_swap(A__ , A__ , i + middle , A__ ) bitonic_merge(A__ , A__ , A__ , A__ ) bitonic_merge(A__ , low + middle , A__ , A__ ) def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if length > 1: __lowerCamelCase = int(length / 2 ) bitonic_sort(A__ , A__ , A__ , 1 ) bitonic_sort(A__ , low + middle , A__ , 0 ) bitonic_merge(A__ , A__ , A__ , A__ ) if __name__ == "__main__": UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(',')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('\nSorted array in ascending order is: ', end='') print(*unsorted, sep=', ') bitonic_merge(unsorted, 0, len(unsorted), 0) print('Sorted array in descending order is: ', end='') print(*unsorted, sep=', ')
80
1
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class lowerCamelCase__: def __init__( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=14 , UpperCamelCase_: int=7 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: int=99 , UpperCamelCase_: str=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: Dict=16 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: List[str]=3 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Tuple=None , ): __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_input_mask __lowerCamelCase = use_labels __lowerCamelCase = use_mc_token_ids __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope __lowerCamelCase = self.vocab_size - 1 def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None if self.use_mc_token_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() __lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase__ ( self: Dict ): return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[str] , *UpperCamelCase_: Optional[Any] ): __lowerCamelCase = CTRLModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ ) model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , *UpperCamelCase_: Tuple ): __lowerCamelCase = CTRLLMHeadModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ) = config_and_inputs __lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask} return config, inputs_dict def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , *UpperCamelCase_: Union[str, Any] ): __lowerCamelCase = self.num_labels __lowerCamelCase = CTRLForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Any = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () UpperCAmelCase__ : Optional[Any] = (CTRLLMHeadModel,) if is_torch_available() else () UpperCAmelCase__ : int = ( { 'feature-extraction': CTRLModel, 'text-classification': CTRLForSequenceClassification, 'text-generation': CTRLLMHeadModel, 'zero-shot': CTRLForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ : List[str] = True UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Optional[Any] = False def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = CTRLModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 ) def lowerCAmelCase__ ( self: Optional[int] ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self: Optional[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase__ ( self: List[Any] ): pass @slow def lowerCAmelCase__ ( self: Optional[Any] ): for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = CTRLModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :) def lowerCAmelCase__ ( self: Optional[Any] ): pass @require_torch class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: List[str] ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" ) model.to(UpperCamelCase_ ) __lowerCamelCase = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=UpperCamelCase_ ) # Legal the president is __lowerCamelCase = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a __lowerCamelCase = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ ) self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
80
from ... import PretrainedConfig UpperCAmelCase_ = { 'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json', } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP UpperCAmelCase__ : Dict = 'nezha' def __init__( self: Dict , UpperCamelCase_: Any=2_11_28 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Optional[int]=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Union[str, Any]=5_12 , UpperCamelCase_: Any=64 , UpperCamelCase_: Dict=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: Optional[Any]=1E-12 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Any=0 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: str=True , **UpperCamelCase_: Any , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = max_relative_position __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = classifier_dropout __lowerCamelCase = use_cache
80
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ = { 'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig'] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['RemBertTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['RemBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RemBertForCausalLM', 'RemBertForMaskedLM', 'RemBertForMultipleChoice', 'RemBertForQuestionAnswering', 'RemBertForSequenceClassification', 'RemBertForTokenClassification', 'RemBertLayer', 'RemBertModel', 'RemBertPreTrainedModel', 'load_tf_weights_in_rembert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRemBertForCausalLM', 'TFRemBertForMaskedLM', 'TFRemBertForMultipleChoice', 'TFRemBertForQuestionAnswering', 'TFRemBertForSequenceClassification', 'TFRemBertForTokenClassification', 'TFRemBertLayer', 'TFRemBertModel', 'TFRemBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
80
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__: def __init__( self: Union[str, Any] , UpperCamelCase_: str = None , UpperCamelCase_: uuid.UUID = None , UpperCamelCase_: Dict=None , UpperCamelCase_: Any=None ): if not conversation_id: __lowerCamelCase = uuid.uuida() if past_user_inputs is None: __lowerCamelCase = [] if generated_responses is None: __lowerCamelCase = [] __lowerCamelCase = conversation_id __lowerCamelCase = past_user_inputs __lowerCamelCase = generated_responses __lowerCamelCase = text def __eq__( self: Optional[Any] , UpperCamelCase_: Union[str, Any] ): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowerCAmelCase__ ( self: int , UpperCamelCase_: str , UpperCamelCase_: bool = False ): if self.new_user_input: if overwrite: logger.warning( F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ' F'with: "{text}".' ) __lowerCamelCase = text else: logger.warning( F'User input added while unprocessed input was existing: "{self.new_user_input}" new input ' F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' ) else: __lowerCamelCase = text def lowerCAmelCase__ ( self: List[str] ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __lowerCamelCase = None def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ): self.generated_responses.append(UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self: Union[str, Any] ): __lowerCamelCase = F'Conversation id: {self.uuid} \n' for is_user, text in self.iter_texts(): __lowerCamelCase = """user""" if is_user else """bot""" output += F'{name} >> {text} \n' return output @add_end_docstrings( __lowerCamelCase , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , ) class lowerCamelCase__( __lowerCamelCase): def __init__( self: List[str] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: str ): super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) if self.tokenizer.pad_token_id is None: __lowerCamelCase = self.tokenizer.eos_token def lowerCAmelCase__ ( self: str , UpperCamelCase_: int=None , UpperCamelCase_: Any=None , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: int ): __lowerCamelCase = {} __lowerCamelCase = {} __lowerCamelCase = {} if min_length_for_response is not None: __lowerCamelCase = min_length_for_response if minimum_tokens is not None: __lowerCamelCase = minimum_tokens if "max_length" in generate_kwargs: __lowerCamelCase = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __lowerCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(UpperCamelCase_ ) return preprocess_params, forward_params, postprocess_params def __call__( self: Any , UpperCamelCase_: Union[Conversation, List[Conversation]] , UpperCamelCase_: Optional[int]=0 , **UpperCamelCase_: Optional[int] ): __lowerCamelCase = super().__call__(UpperCamelCase_ , num_workers=UpperCamelCase_ , **UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1: return outputs[0] return outputs def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Conversation , UpperCamelCase_: Optional[Any]=32 ): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ' """Add user inputs with the conversation's `add_user_input` method""" ) if hasattr(self.tokenizer , """_build_conversation_input_ids""" ): __lowerCamelCase = self.tokenizer._build_conversation_input_ids(UpperCamelCase_ ) else: # If the tokenizer cannot handle conversations, we default to only the old version __lowerCamelCase = self._legacy_parse_and_tokenize(UpperCamelCase_ ) if self.framework == "pt": __lowerCamelCase = torch.LongTensor([input_ids] ) elif self.framework == "tf": __lowerCamelCase = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str=10 , **UpperCamelCase_: List[str] ): __lowerCamelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length ) __lowerCamelCase = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' ) __lowerCamelCase = max_length - minimum_tokens __lowerCamelCase = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: __lowerCamelCase = model_inputs["""attention_mask"""][:, -trim:] __lowerCamelCase = model_inputs.pop("""conversation""" ) __lowerCamelCase = max_length __lowerCamelCase = self.model.generate(**UpperCamelCase_ , **UpperCamelCase_ ) if self.model.config.is_encoder_decoder: __lowerCamelCase = 1 else: __lowerCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int=True ): __lowerCamelCase = model_outputs["""output_ids"""] __lowerCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , ) __lowerCamelCase = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(UpperCamelCase_ ) return conversation def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Conversation ): __lowerCamelCase = self.tokenizer.eos_token_id __lowerCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) if len(UpperCamelCase_ ) > self.tokenizer.model_max_length: __lowerCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
80
1
def lowerCamelCase__ ( A__ : int ): '''simple docstring''' return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('Program to check whether a number is a Perfect number or not...') UpperCAmelCase_ = int(input('Enter number: ').strip()) print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
80
import math def lowerCamelCase__ ( A__ : int ): '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = 2 __lowerCamelCase = int(math.sqrt(A__ ) ) # Size of every segment __lowerCamelCase = [True] * (end + 1) __lowerCamelCase = [] while start <= end: if temp[start] is True: in_prime.append(A__ ) for i in range(start * start , end + 1 , A__ ): __lowerCamelCase = False start += 1 prime += in_prime __lowerCamelCase = end + 1 __lowerCamelCase = min(2 * end , A__ ) while low <= n: __lowerCamelCase = [True] * (high - low + 1) for each in in_prime: __lowerCamelCase = math.floor(low / each ) * each if t < low: t += each for j in range(A__ , high + 1 , A__ ): __lowerCamelCase = False for j in range(len(A__ ) ): if temp[j] is True: prime.append(j + low ) __lowerCamelCase = high + 1 __lowerCamelCase = min(high + end , A__ ) return prime print(sieve(10**6))
80
1
UpperCAmelCase_ = tuple[float, float, float] UpperCAmelCase_ = tuple[float, float, float] def lowerCamelCase__ ( A__ : Pointad , A__ : Pointad ): '''simple docstring''' __lowerCamelCase = end_pointa[0] - end_pointa[0] __lowerCamelCase = end_pointa[1] - end_pointa[1] __lowerCamelCase = end_pointa[2] - end_pointa[2] return (x, y, z) def lowerCamelCase__ ( A__ : Vectorad , A__ : Vectorad ): '''simple docstring''' __lowerCamelCase = ab[1] * ac[2] - ab[2] * ac[1] # *i __lowerCamelCase = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j __lowerCamelCase = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def lowerCamelCase__ ( A__ : Vectorad , A__ : int ): '''simple docstring''' return tuple(round(A__ , A__ ) for x in vector ) == (0, 0, 0) def lowerCamelCase__ ( A__ : Pointad , A__ : Pointad , A__ : Pointad , A__ : int = 10 ): '''simple docstring''' __lowerCamelCase = create_vector(A__ , A__ ) __lowerCamelCase = create_vector(A__ , A__ ) return is_zero_vector(get_ad_vectors_cross(A__ , A__ ) , A__ )
80
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model') class lowerCamelCase__( __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : int = BartphoTokenizer UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : List[str] = True def lowerCAmelCase__ ( self: Tuple ): super().setUp() __lowerCamelCase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] __lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) __lowerCamelCase = {"""unk_token""": """<unk>"""} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] ) with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp: for token in vocab_tokens: fp.write(F'{token} {vocab_tokens[token]}\n' ) __lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: List[str] ): kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ): __lowerCamelCase = """This is a là test""" __lowerCamelCase = """This is a<unk><unk> test""" return input_text, output_text def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map ) __lowerCamelCase = """This is a là test""" __lowerCamelCase = """▁This ▁is ▁a ▁l à ▁t est""".split() __lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __lowerCamelCase = tokens + [tokenizer.unk_token] __lowerCamelCase = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
80
1
import re def lowerCamelCase__ ( A__ : str ): '''simple docstring''' __lowerCamelCase = re.compile( R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" ) return bool(re.search(A__ , A__ ) ) if __name__ == "__main__": UpperCAmelCase_ = '0094702343221' print(is_sri_lankan_phone_number(phone))
80
def lowerCamelCase__ ( A__ : dict ): '''simple docstring''' __lowerCamelCase = set() # To detect a back edge, keep track of vertices currently in the recursion stack __lowerCamelCase = set() return any( node not in visited and depth_first_search(A__ , A__ , A__ , A__ ) for node in graph ) def lowerCamelCase__ ( A__ : dict , A__ : int , A__ : set , A__ : set ): '''simple docstring''' visited.add(A__ ) rec_stk.add(A__ ) for node in graph[vertex]: if node not in visited: if depth_first_search(A__ , A__ , A__ , A__ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(A__ ) return False if __name__ == "__main__": from doctest import testmod testmod()
80
1
from __future__ import annotations def lowerCamelCase__ ( A__ : list[float] , A__ : list[float] ): '''simple docstring''' __lowerCamelCase = sorted(numsa + numsa ) __lowerCamelCase, __lowerCamelCase = divmod(len(A__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = [float(x) for x in input('Enter the elements of first array: ').split()] UpperCAmelCase_ = [float(x) for x in input('Enter the elements of second array: ').split()] print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
80
from __future__ import annotations def lowerCamelCase__ ( A__ : list[float] , A__ : list[float] ): '''simple docstring''' __lowerCamelCase = sorted(numsa + numsa ) __lowerCamelCase, __lowerCamelCase = divmod(len(A__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = [float(x) for x in input('Enter the elements of first array: ').split()] UpperCAmelCase_ = [float(x) for x in input('Enter the elements of second array: ').split()] print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
80
1
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = torch.nn.Linear(10 , 10 ) __lowerCamelCase = torch.optim.SGD(model.parameters() , 0.1 ) __lowerCamelCase = Accelerator() __lowerCamelCase = accelerator.prepare(UpperCamelCase_ ) try: pickle.loads(pickle.dumps(UpperCamelCase_ ) ) except Exception as e: self.fail(F'Accelerated optimizer pickling failed with {e}' ) AcceleratorState._reset_state()
80
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: str ): __lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) __lowerCamelCase = get_activation("""gelu""" ) self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) ) self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) ) def lowerCAmelCase__ ( self: str ): __lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) __lowerCamelCase = get_activation("""gelu""" ) __lowerCamelCase = get_activation("""gelu_10""" ) __lowerCamelCase = torch_builtin(UpperCamelCase_ ) __lowerCamelCase = geluaa(UpperCamelCase_ ) __lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def lowerCAmelCase__ ( self: str ): get_activation("""gelu""" ) get_activation("""gelu_10""" ) get_activation("""gelu_fast""" ) get_activation("""gelu_new""" ) get_activation("""gelu_python""" ) get_activation("""gelu_pytorch_tanh""" ) get_activation("""linear""" ) get_activation("""mish""" ) get_activation("""quick_gelu""" ) get_activation("""relu""" ) get_activation("""sigmoid""" ) get_activation("""silu""" ) get_activation("""swish""" ) get_activation("""tanh""" ) with self.assertRaises(UpperCamelCase_ ): get_activation("""bogus""" ) with self.assertRaises(UpperCamelCase_ ): get_activation(UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = get_activation("""gelu""" ) __lowerCamelCase = 1 __lowerCamelCase = get_activation("""gelu""" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(UpperCamelCase_ ): __lowerCamelCase = acta.a
80
1
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class lowerCamelCase__( __lowerCamelCase): def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: float ): return 0.0 def lowerCamelCase__ ( A__ : np.ndarray , A__ : int ): '''simple docstring''' __lowerCamelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) __lowerCamelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def lowerCamelCase__ ( A__ : FilterType , A__ : int ): '''simple docstring''' __lowerCamelCase = 512 __lowerCamelCase = [1] + [0] * (size - 1) __lowerCamelCase = [filter_type.process(A__ ) for item in inputs] __lowerCamelCase = [0] * (samplerate - size) # zero-padding outputs += filler __lowerCamelCase = np.abs(np.fft.fft(A__ ) ) __lowerCamelCase = 20 * np.logaa(A__ ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) # Display within reasonable bounds __lowerCamelCase = get_bounds(A__ , A__ ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("""Gain (dB)""" ) plt.plot(A__ ) plt.show() def lowerCamelCase__ ( A__ : FilterType , A__ : int ): '''simple docstring''' __lowerCamelCase = 512 __lowerCamelCase = [1] + [0] * (size - 1) __lowerCamelCase = [filter_type.process(A__ ) for item in inputs] __lowerCamelCase = [0] * (samplerate - size) # zero-padding outputs += filler __lowerCamelCase = np.angle(np.fft.fft(A__ ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("""Phase shift (Radians)""" ) plt.plot(np.unwrap(A__ , -2 * pi ) ) plt.show()
80
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class lowerCamelCase__( __lowerCamelCase): @slow @require_torch def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) __lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" ) __lowerCamelCase = bertabert.config.encoder.vocab_size __lowerCamelCase = tokenizer.sep_token_id __lowerCamelCase = tokenizer.cls_token_id __lowerCamelCase = 1_28 __lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) __lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) __lowerCamelCase = train_dataset.select(range(32 ) ) __lowerCamelCase = val_dataset.select(range(16 ) ) __lowerCamelCase = 4 def _map_to_encoder_decoder_inputs(UpperCamelCase_: List[Any] ): # Tokenizer will automatically set [BOS] <text> [EOS] __lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=5_12 ) __lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=1_28 ) __lowerCamelCase = inputs.input_ids __lowerCamelCase = inputs.attention_mask __lowerCamelCase = outputs.input_ids __lowerCamelCase = outputs.input_ids.copy() __lowerCamelCase = [ [-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] __lowerCamelCase = outputs.attention_mask assert all(len(UpperCamelCase_ ) == 5_12 for x in inputs.input_ids ) assert all(len(UpperCamelCase_ ) == 1_28 for x in outputs.input_ids ) return batch def _compute_metrics(UpperCamelCase_: int ): __lowerCamelCase = pred.label_ids __lowerCamelCase = pred.predictions # all unnecessary tokens are removed __lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) __lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) __lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ ) return {"accuracy": accuracy} # map train dataset __lowerCamelCase = train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset __lowerCamelCase = val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) __lowerCamelCase = self.get_auto_remove_tmp_dir() __lowerCamelCase = SeqaSeqTrainingArguments( output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy="""steps""" , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer __lowerCamelCase = SeqaSeqTrainer( model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , ) # start training trainer.train()
80
1
import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP UpperCAmelCase_ = False try: UpperCAmelCase_ = _is_package_available('google.colab') except ModuleNotFoundError: pass @input.register class lowerCamelCase__: def __init__( self: str , UpperCamelCase_: str = None , UpperCamelCase_: list = [] ): __lowerCamelCase = 0 __lowerCamelCase = choices __lowerCamelCase = prompt if sys.platform == "win32": __lowerCamelCase = """*""" else: __lowerCamelCase = """➔ """ def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[Any] , UpperCamelCase_: str = "" ): if sys.platform != "win32": writeColor(self.choices[index] , 32 , UpperCamelCase_ ) else: forceWrite(self.choices[index] , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Any , UpperCamelCase_: int ): if index == self.position: forceWrite(F' {self.arrow_char} ' ) self.write_choice(UpperCamelCase_ ) else: forceWrite(F' {self.choices[index]}' ) reset_cursor() def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Direction , UpperCamelCase_: int = 1 ): __lowerCamelCase = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(UpperCamelCase_ ) move_cursor(UpperCamelCase_ , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["""up"""] ) def lowerCAmelCase__ ( self: Dict ): self.move_direction(Direction.UP ) @input.mark(KEYMAP["""down"""] ) def lowerCAmelCase__ ( self: str ): self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["""newline"""] ) def lowerCAmelCase__ ( self: int ): move_cursor(len(self.choices ) - self.position , """DOWN""" ) return self.position @input.mark(KEYMAP["""interrupt"""] ) def lowerCAmelCase__ ( self: Union[str, Any] ): move_cursor(len(self.choices ) - self.position , """DOWN""" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(UpperCamelCase_ )] for number in range(10 )] ) def lowerCAmelCase__ ( self: str ): __lowerCamelCase = int(chr(self.current_selection ) ) __lowerCamelCase = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , UpperCamelCase_ ) else: return else: return def lowerCAmelCase__ ( self: int , UpperCamelCase_: int = 0 ): if self.prompt: linebreak() forceWrite(self.prompt , """\n""" ) if in_colab: forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" ) else: forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" ) __lowerCamelCase = default_choice for i in range(len(self.choices ) ): self.print_choice(UpperCamelCase_ ) forceWrite("""\n""" ) move_cursor(len(self.choices ) - self.position , """UP""" ) with cursor.hide(): while True: if in_colab: try: __lowerCamelCase = int(builtins.input() ) except ValueError: __lowerCamelCase = default_choice else: __lowerCamelCase = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , """UP""" ) clear_line() self.write_choice(UpperCamelCase_ , """\n""" ) return choice
80
class lowerCamelCase__: # Public class to implement a graph def __init__( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ): __lowerCamelCase = row __lowerCamelCase = col __lowerCamelCase = graph def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ): return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ): # Checking all 8 elements surrounding nth element __lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1] __lowerCamelCase = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] ): # And finally, count all islands. __lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )] __lowerCamelCase = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) count += 1 return count
80
1
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCAmelCase_ = 256 class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : str = ['melgan'] def __init__( self: Tuple , UpperCamelCase_: SpectrogramNotesEncoder , UpperCamelCase_: SpectrogramContEncoder , UpperCamelCase_: TaFilmDecoder , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: OnnxRuntimeModel if is_onnx_available() else Any , ): super().__init__() # From MELGAN __lowerCamelCase = math.log(1E-5 ) # Matches MelGAN training. __lowerCamelCase = 4.0 # Largest value for most examples __lowerCamelCase = 1_28 self.register_modules( notes_encoder=UpperCamelCase_ , continuous_encoder=UpperCamelCase_ , decoder=UpperCamelCase_ , scheduler=UpperCamelCase_ , melgan=UpperCamelCase_ , ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: Any=(-1.0, 1.0) , UpperCamelCase_: Optional[int]=False ): __lowerCamelCase, __lowerCamelCase = output_range if clip: __lowerCamelCase = torch.clip(UpperCamelCase_ , self.min_value , self.max_value ) # Scale to [0, 1]. __lowerCamelCase = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: Any=(-1.0, 1.0) , UpperCamelCase_: Optional[Any]=False ): __lowerCamelCase, __lowerCamelCase = input_range __lowerCamelCase = torch.clip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if clip else outputs # Scale to [0, 1]. __lowerCamelCase = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def lowerCAmelCase__ ( self: str , UpperCamelCase_: Dict , UpperCamelCase_: int , UpperCamelCase_: Dict ): __lowerCamelCase = input_tokens > 0 __lowerCamelCase, __lowerCamelCase = self.notes_encoder( encoder_input_tokens=UpperCamelCase_ , encoder_inputs_mask=UpperCamelCase_ ) __lowerCamelCase, __lowerCamelCase = self.continuous_encoder( encoder_inputs=UpperCamelCase_ , encoder_inputs_mask=UpperCamelCase_ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] ): __lowerCamelCase = noise_time if not torch.is_tensor(UpperCamelCase_ ): __lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(UpperCamelCase_ ) and len(timesteps.shape ) == 0: __lowerCamelCase = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __lowerCamelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) __lowerCamelCase = self.decoder( encodings_and_masks=UpperCamelCase_ , decoder_input_tokens=UpperCamelCase_ , decoder_noise_time=UpperCamelCase_ ) return logits @torch.no_grad() def __call__( self: Optional[int] , UpperCamelCase_: List[List[int]] , UpperCamelCase_: Optional[torch.Generator] = None , UpperCamelCase_: int = 1_00 , UpperCamelCase_: bool = True , UpperCamelCase_: str = "numpy" , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , ): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0) ): raise ValueError( F'`callback_steps` has to be a positive integer but is {callback_steps} of type' F' {type(UpperCamelCase_ )}.' ) __lowerCamelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) __lowerCamelCase = np.zeros([1, 0, self.n_dims] , np.floataa ) __lowerCamelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase_ , device=self.device ) for i, encoder_input_tokens in enumerate(UpperCamelCase_ ): if i == 0: __lowerCamelCase = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. __lowerCamelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase_ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. __lowerCamelCase = ones __lowerCamelCase = self.scale_features( UpperCamelCase_ , output_range=[-1.0, 1.0] , clip=UpperCamelCase_ ) __lowerCamelCase = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=UpperCamelCase_ , continuous_mask=UpperCamelCase_ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop __lowerCamelCase = randn_tensor( shape=encoder_continuous_inputs.shape , generator=UpperCamelCase_ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(UpperCamelCase_ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): __lowerCamelCase = self.decode( encodings_and_masks=UpperCamelCase_ , input_tokens=UpperCamelCase_ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 __lowerCamelCase = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample __lowerCamelCase = self.scale_to_features(UpperCamelCase_ , input_range=[-1.0, 1.0] ) __lowerCamelCase = mel[:1] __lowerCamelCase = mel.cpu().float().numpy() __lowerCamelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(UpperCamelCase_ , UpperCamelCase_ ) logger.info("""Generated segment""" , UpperCamelCase_ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( """Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" ) elif output_type == "numpy" and self.melgan is None: raise ValueError( """Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" ) if output_type == "numpy": __lowerCamelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: __lowerCamelCase = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=UpperCamelCase_ )
80
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) def lowerCamelCase__ ( A__ : str ): '''simple docstring''' __lowerCamelCase = DPTConfig() if "large" in checkpoint_url: __lowerCamelCase = 1024 __lowerCamelCase = 4096 __lowerCamelCase = 24 __lowerCamelCase = 16 __lowerCamelCase = [5, 11, 17, 23] __lowerCamelCase = [256, 512, 1024, 1024] __lowerCamelCase = (1, 384, 384) if "ade" in checkpoint_url: __lowerCamelCase = True __lowerCamelCase = 150 __lowerCamelCase = """huggingface/label-files""" __lowerCamelCase = """ade20k-id2label.json""" __lowerCamelCase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="""dataset""" ) ) , """r""" ) ) __lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()} __lowerCamelCase = idalabel __lowerCamelCase = {v: k for k, v in idalabel.items()} __lowerCamelCase = [1, 150, 480, 480] return config, expected_shape def lowerCamelCase__ ( A__ : Dict ): '''simple docstring''' __lowerCamelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(A__ , A__ ) def lowerCamelCase__ ( A__ : Dict ): '''simple docstring''' if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): __lowerCamelCase = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: __lowerCamelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: __lowerCamelCase = name.replace("""patch_embed""" , """patch_embeddings""" ) if "pos_embed" in name: __lowerCamelCase = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: __lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: __lowerCamelCase = name.replace("""proj""" , """projection""" ) if "blocks" in name: __lowerCamelCase = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: __lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name: __lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: __lowerCamelCase = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: __lowerCamelCase = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: __lowerCamelCase = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: __lowerCamelCase = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: __lowerCamelCase = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: __lowerCamelCase = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: __lowerCamelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 __lowerCamelCase = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' ) if "out_conv" in name: __lowerCamelCase = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: __lowerCamelCase = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: __lowerCamelCase = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: __lowerCamelCase = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: __lowerCamelCase = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: __lowerCamelCase = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: __lowerCamelCase = name.replace("""bn""" , """batch_norm""" ) if "head" in name: __lowerCamelCase = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: __lowerCamelCase = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: __lowerCamelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" ) return name def lowerCamelCase__ ( A__ : Tuple , A__ : Any ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' ) __lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __lowerCamelCase = in_proj_weight[: config.hidden_size, :] __lowerCamelCase = in_proj_bias[: config.hidden_size] __lowerCamelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __lowerCamelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __lowerCamelCase = in_proj_weight[ -config.hidden_size :, : ] __lowerCamelCase = in_proj_bias[-config.hidden_size :] def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def lowerCamelCase__ ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : List[str] , A__ : Union[str, Any] ): '''simple docstring''' __lowerCamelCase, __lowerCamelCase = get_dpt_config(A__ ) # load original state_dict from URL __lowerCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(A__ ) # rename keys for key in state_dict.copy().keys(): __lowerCamelCase = state_dict.pop(A__ ) __lowerCamelCase = val # read in qkv matrices read_in_q_k_v(A__ , A__ ) # load HuggingFace model __lowerCamelCase = DPTForSemanticSegmentation(A__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(A__ ) model.load_state_dict(A__ ) model.eval() # Check outputs on an image __lowerCamelCase = 480 if """ade""" in checkpoint_url else 384 __lowerCamelCase = DPTImageProcessor(size=A__ ) __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(A__ , return_tensors="""pt""" ) # forward pass __lowerCamelCase = model(**A__ ).logits if """ade""" in checkpoint_url else model(**A__ ).predicted_depth # Assert logits __lowerCamelCase = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] ) if "ade" in checkpoint_url: __lowerCamelCase = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] ) assert outputs.shape == torch.Size(A__ ) assert ( torch.allclose(outputs[0, 0, :3, :3] , A__ , atol=1E-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , A__ ) ) Path(A__ ).mkdir(exist_ok=A__ ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(A__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(A__ ) if push_to_hub: print("""Pushing model to hub...""" ) model.push_to_hub( repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=A__ , ) image_processor.push_to_hub( repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=A__ , ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) UpperCAmelCase_ = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
80
1
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class lowerCamelCase__( __lowerCamelCase): def __init__( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=13 , UpperCamelCase_: Union[str, Any]=7 , UpperCamelCase_: List[Any]=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: Tuple=False , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: int=99 , UpperCamelCase_: Dict=32 , UpperCamelCase_: Dict=5 , UpperCamelCase_: Union[str, Any]=4 , UpperCamelCase_: str=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Tuple=5_12 , UpperCamelCase_: List[Any]=16 , UpperCamelCase_: Dict=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: str=None , ): __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope def lowerCAmelCase__ ( self: int ): __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase__ ( self: int ): return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any , UpperCamelCase_: Tuple ): __lowerCamelCase = DistilBertModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] ): __lowerCamelCase = DistilBertForMaskedLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: str ): __lowerCamelCase = DistilBertForQuestionAnswering(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int] ): __lowerCamelCase = self.num_labels __lowerCamelCase = DistilBertForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase__ ( self: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] ): __lowerCamelCase = self.num_labels __lowerCamelCase = DistilBertForTokenClassification(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any] ): __lowerCamelCase = self.num_choices __lowerCamelCase = DistilBertForMultipleChoice(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase__ ( self: int ): __lowerCamelCase = self.prepare_config_and_inputs() ((__lowerCamelCase), (__lowerCamelCase), (__lowerCamelCase), (__lowerCamelCase), (__lowerCamelCase), (__lowerCamelCase)) = config_and_inputs __lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : str = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) UpperCAmelCase__ : Optional[Any] = ( { 'feature-extraction': DistilBertModel, 'fill-mask': DistilBertForMaskedLM, 'question-answering': DistilBertForQuestionAnswering, 'text-classification': DistilBertForSequenceClassification, 'token-classification': DistilBertForTokenClassification, 'zero-shot': DistilBertForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ : Optional[Any] = True UpperCAmelCase__ : Dict = True UpperCAmelCase__ : Dict = True UpperCAmelCase__ : List[str] = True def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = DistilBertModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , dim=37 ) def lowerCAmelCase__ ( self: Optional[int] ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: int ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: str ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCamelCase_ ) @slow def lowerCAmelCase__ ( self: int ): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = DistilBertModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @slow @require_torch_gpu def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __lowerCamelCase = True __lowerCamelCase = model_class(config=UpperCamelCase_ ) __lowerCamelCase = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) __lowerCamelCase = torch.jit.trace( UpperCamelCase_ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(UpperCamelCase_ , os.path.join(UpperCamelCase_ , """traced_model.pt""" ) ) __lowerCamelCase = torch.jit.load(os.path.join(UpperCamelCase_ , """traced_model.pt""" ) , map_location=UpperCamelCase_ ) loaded(inputs_dict["""input_ids"""].to(UpperCamelCase_ ) , inputs_dict["""attention_mask"""].to(UpperCamelCase_ ) ) @require_torch class lowerCamelCase__( unittest.TestCase): @slow def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = DistilBertModel.from_pretrained("""distilbert-base-uncased""" ) __lowerCamelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __lowerCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0] __lowerCamelCase = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , UpperCamelCase_ ) __lowerCamelCase = torch.tensor( [[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase_ , atol=1E-4 ) )
80
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
80
1
from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCAmelCase_ = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
80
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Tuple = 'bert' def __init__( self: List[str] , UpperCamelCase_: str=3_05_22 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=5_12 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: List[str]=1E-12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: Tuple=True , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Optional[Any] , ): super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache __lowerCamelCase = classifier_dropout class lowerCamelCase__( __lowerCamelCase): @property def lowerCAmelCase__ ( self: Any ): if self.task == "multiple-choice": __lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __lowerCamelCase = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
80
1
class lowerCamelCase__: # Public class to implement a graph def __init__( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ): __lowerCamelCase = row __lowerCamelCase = col __lowerCamelCase = graph def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ): return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ): # Checking all 8 elements surrounding nth element __lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1] __lowerCamelCase = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] ): # And finally, count all islands. __lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )] __lowerCamelCase = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) count += 1 return count
80
from __future__ import annotations from math import ceil, floor, sqrt def lowerCamelCase__ ( A__ : int = 2000000 ): '''simple docstring''' __lowerCamelCase = [0] __lowerCamelCase = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target __lowerCamelCase = 0 # the area corresponding to the grid that gives the product closest to target __lowerCamelCase = 0 # an estimate of b, using the quadratic formula __lowerCamelCase = 42 # the largest integer less than b_estimate __lowerCamelCase = 42 # the largest integer less than b_estimate __lowerCamelCase = 42 # the triangle number corresponding to b_floor __lowerCamelCase = 42 # the triangle number corresponding to b_ceil __lowerCamelCase = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): __lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 __lowerCamelCase = floor(A__ ) __lowerCamelCase = ceil(A__ ) __lowerCamelCase = triangle_numbers[b_floor] __lowerCamelCase = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): __lowerCamelCase = triangle_b_first_guess * triangle_a __lowerCamelCase = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): __lowerCamelCase = triangle_b_second_guess * triangle_a __lowerCamelCase = idx_a * b_ceil return area if __name__ == "__main__": print(f"""{solution() = }""")
80
1
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('1.0.0a'): raise Exception('requires fairseq >= 1.0.0a') logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = 'Hello world! cécé herlolip' def lowerCamelCase__ ( A__ : str , A__ : str , A__ : bool ): '''simple docstring''' __lowerCamelCase = FairseqRobertaModel.from_pretrained(A__ ) roberta.eval() # disable dropout __lowerCamelCase = roberta.model.encoder.sentence_encoder __lowerCamelCase = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , ) if classification_head: __lowerCamelCase = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0] print("""Our RoBERTa config:""" , A__ ) __lowerCamelCase = XLMRobertaXLForSequenceClassification(A__ ) if classification_head else XLMRobertaXLForMaskedLM(A__ ) model.eval() # Now let's copy all the weights. # Embeddings __lowerCamelCase = roberta_sent_encoder.embed_tokens.weight __lowerCamelCase = roberta_sent_encoder.embed_positions.weight __lowerCamelCase = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. __lowerCamelCase = roberta_sent_encoder.layer_norm.weight __lowerCamelCase = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer __lowerCamelCase = model.roberta.encoder.layer[i] __lowerCamelCase = roberta_sent_encoder.layers[i] __lowerCamelCase = layer.attention __lowerCamelCase = roberta_layer.self_attn_layer_norm.weight __lowerCamelCase = roberta_layer.self_attn_layer_norm.bias # self attention __lowerCamelCase = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) __lowerCamelCase = roberta_layer.self_attn.q_proj.weight __lowerCamelCase = roberta_layer.self_attn.q_proj.bias __lowerCamelCase = roberta_layer.self_attn.k_proj.weight __lowerCamelCase = roberta_layer.self_attn.k_proj.bias __lowerCamelCase = roberta_layer.self_attn.v_proj.weight __lowerCamelCase = roberta_layer.self_attn.v_proj.bias # self-attention output __lowerCamelCase = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape __lowerCamelCase = roberta_layer.self_attn.out_proj.weight __lowerCamelCase = roberta_layer.self_attn.out_proj.bias # this one is final layer norm __lowerCamelCase = roberta_layer.final_layer_norm.weight __lowerCamelCase = roberta_layer.final_layer_norm.bias # intermediate __lowerCamelCase = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape __lowerCamelCase = roberta_layer.fca.weight __lowerCamelCase = roberta_layer.fca.bias # output __lowerCamelCase = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape __lowerCamelCase = roberta_layer.fca.weight __lowerCamelCase = roberta_layer.fca.bias # end of layer if classification_head: __lowerCamelCase = roberta.model.classification_heads["""mnli"""].dense.weight __lowerCamelCase = roberta.model.classification_heads["""mnli"""].dense.bias __lowerCamelCase = roberta.model.classification_heads["""mnli"""].out_proj.weight __lowerCamelCase = roberta.model.classification_heads["""mnli"""].out_proj.bias else: # LM Head __lowerCamelCase = roberta.model.encoder.lm_head.dense.weight __lowerCamelCase = roberta.model.encoder.lm_head.dense.bias __lowerCamelCase = roberta.model.encoder.lm_head.layer_norm.weight __lowerCamelCase = roberta.model.encoder.lm_head.layer_norm.bias __lowerCamelCase = roberta.model.encoder.lm_head.weight __lowerCamelCase = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. __lowerCamelCase = roberta.encode(A__ ).unsqueeze(0 ) # batch of size 1 __lowerCamelCase = model(A__ )[0] if classification_head: __lowerCamelCase = roberta.model.classification_heads["""mnli"""](roberta.extract_features(A__ ) ) else: __lowerCamelCase = roberta.model(A__ )[0] print(our_output.shape , their_output.shape ) __lowerCamelCase = torch.max(torch.abs(our_output - their_output ) ).item() print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7 __lowerCamelCase = torch.allclose(A__ , A__ , atol=1E-3 ) print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" ) if not success: raise Exception("""Something went wRoNg""" ) pathlib.Path(A__ ).mkdir(parents=A__ , exist_ok=A__ ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(A__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--classification_head', action='store_true', help='Whether to convert a final classification head.' ) UpperCAmelCase_ = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
80
import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = [] __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCamelCase_ ) __lowerCamelCase = resnets __lowerCamelCase = attentions if self.add_downsample: __lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=True ): __lowerCamelCase = () for resnet, attn in zip(self.resnets , self.attentions ): __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) __lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: __lowerCamelCase = self.downsamplers_a(UpperCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = resnets if self.add_downsample: __lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=True ): __lowerCamelCase = () for resnet in self.resnets: __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: __lowerCamelCase = self.downsamplers_a(UpperCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = [] __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels __lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCamelCase_ ) __lowerCamelCase = resnets __lowerCamelCase = attentions if self.add_upsample: __lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any]=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states __lowerCamelCase = res_hidden_states_tuple[-1] __lowerCamelCase = res_hidden_states_tuple[:-1] __lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) __lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) if self.add_upsample: __lowerCamelCase = self.upsamplers_a(UpperCamelCase_ ) return hidden_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels __lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = resnets if self.add_upsample: __lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=True ): for resnet in self.resnets: # pop res hidden states __lowerCamelCase = res_hidden_states_tuple[-1] __lowerCamelCase = res_hidden_states_tuple[:-1] __lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) if self.add_upsample: __lowerCamelCase = self.upsamplers_a(UpperCamelCase_ ) return hidden_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: int ): # there is always at least one resnet __lowerCamelCase = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] __lowerCamelCase = [] for _ in range(self.num_layers ): __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCamelCase_ ) __lowerCamelCase = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = resnets __lowerCamelCase = attentions def __call__( self: int , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=True ): __lowerCamelCase = self.resnets[0](UpperCamelCase_ , UpperCamelCase_ ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): __lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) return hidden_states
80
1
def lowerCamelCase__ ( A__ : str ): '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("""String must only contain alphabetic characters.""" ) __lowerCamelCase = sorted(string.lower() ) return len(A__ ) == len(set(A__ ) ) if __name__ == "__main__": UpperCAmelCase_ = input('Enter a string ').strip() UpperCAmelCase_ = is_isogram(input_str) print(f"""{input_str} is {"an" if isogram else "not an"} isogram.""")
80
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging UpperCAmelCase_ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt'] UpperCAmelCase_ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('0.9.0'): raise Exception('requires fairseq >= 0.9.0') logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = ' Hello world! cécé herlolip' UpperCAmelCase_ = [ ('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'), ('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'), ('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'), ('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'), ] def lowerCamelCase__ ( A__ : List[Any] ): '''simple docstring''' __lowerCamelCase = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """_float_tensor""", ] for k in ignore_keys: state_dict.pop(A__ , A__ ) def lowerCamelCase__ ( A__ : Tuple , A__ : Any , A__ : Union[str, Any] ): '''simple docstring''' __lowerCamelCase = dct.pop(A__ ) __lowerCamelCase = val def lowerCamelCase__ ( A__ : Tuple ): '''simple docstring''' __lowerCamelCase = torch.load(A__ , map_location="""cpu""" ) __lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval() hub_interface.model.load_state_dict(sd["""model"""] ) return hub_interface def lowerCamelCase__ ( A__ : List[Any] ): '''simple docstring''' __lowerCamelCase, __lowerCamelCase = emb.weight.shape __lowerCamelCase = nn.Linear(A__ , A__ , bias=A__ ) __lowerCamelCase = emb.weight.data return lin_layer @torch.no_grad() def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None ): '''simple docstring''' if not os.path.exists(A__ ): __lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , A__ ).eval() else: __lowerCamelCase = load_xsum_checkpoint(A__ ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: __lowerCamelCase = checkpoint_path.replace(""".""" , """-""" ) __lowerCamelCase = BartConfig.from_pretrained(A__ ) __lowerCamelCase = bart.encode(A__ ).unsqueeze(0 ) __lowerCamelCase = BartTokenizer.from_pretrained(A__ ).encode(A__ , return_tensors="""pt""" ).unsqueeze(0 ) if not torch.eq(A__ , A__ ).all(): raise ValueError( f'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' ) if checkpoint_path == "bart.large.mnli": __lowerCamelCase = bart.state_dict() remove_ignore_keys_(A__ ) __lowerCamelCase = state_dict["""model.decoder.embed_tokens.weight"""] for src, dest in mnli_rename_keys: rename_key(A__ , A__ , A__ ) __lowerCamelCase = BartForSequenceClassification(A__ ).eval() model.load_state_dict(A__ ) __lowerCamelCase = bart.predict("""mnli""" , A__ , return_logits=A__ ) __lowerCamelCase = model(A__ )[0] # logits else: # no classification heads to worry about __lowerCamelCase = bart.model.state_dict() remove_ignore_keys_(A__ ) __lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""] __lowerCamelCase = bart.extract_features(A__ ) if hf_checkpoint_name == "facebook/bart-large": __lowerCamelCase = BartModel(A__ ).eval() model.load_state_dict(A__ ) __lowerCamelCase = model(A__ ).model[0] else: __lowerCamelCase = BartForConditionalGeneration(A__ ).eval() # an existing summarization ckpt model.model.load_state_dict(A__ ) if hasattr(A__ , """lm_head""" ): __lowerCamelCase = make_linear_from_emb(model.model.shared ) __lowerCamelCase = model.model(A__ )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( f'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum' ) UpperCAmelCase_ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
80
1
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class lowerCamelCase__( pl.LightningModule): def __init__( self: List[Any] , UpperCamelCase_: Union[str, Any] ): super().__init__() __lowerCamelCase = model __lowerCamelCase = 2 __lowerCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels ) def lowerCAmelCase__ ( self: Tuple ): pass def lowerCamelCase__ ( A__ : str , A__ : str , A__ : str ): '''simple docstring''' __lowerCamelCase = LongformerModel.from_pretrained(A__ ) __lowerCamelCase = LightningModel(A__ ) __lowerCamelCase = torch.load(A__ , map_location=torch.device("""cpu""" ) ) lightning_model.load_state_dict(ckpt["""state_dict"""] ) # init longformer question answering model __lowerCamelCase = LongformerForQuestionAnswering.from_pretrained(A__ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(A__ ) print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--longformer_model', default=None, type=str, required=True, help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.', ) parser.add_argument( '--longformer_question_answering_ckpt_path', default=None, type=str, required=True, help='Path the official PyTorch Lightning Checkpoint.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) UpperCAmelCase_ = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
80
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class lowerCamelCase__: def __init__( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=14 , UpperCamelCase_: int=7 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: int=99 , UpperCamelCase_: str=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: Dict=16 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: List[str]=3 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Tuple=None , ): __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_input_mask __lowerCamelCase = use_labels __lowerCamelCase = use_mc_token_ids __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope __lowerCamelCase = self.vocab_size - 1 def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None if self.use_mc_token_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() __lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase__ ( self: Dict ): return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[str] , *UpperCamelCase_: Optional[Any] ): __lowerCamelCase = CTRLModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ ) model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , *UpperCamelCase_: Tuple ): __lowerCamelCase = CTRLLMHeadModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ) = config_and_inputs __lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask} return config, inputs_dict def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , *UpperCamelCase_: Union[str, Any] ): __lowerCamelCase = self.num_labels __lowerCamelCase = CTRLForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Any = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () UpperCAmelCase__ : Optional[Any] = (CTRLLMHeadModel,) if is_torch_available() else () UpperCAmelCase__ : int = ( { 'feature-extraction': CTRLModel, 'text-classification': CTRLForSequenceClassification, 'text-generation': CTRLLMHeadModel, 'zero-shot': CTRLForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ : List[str] = True UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Optional[Any] = False def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = CTRLModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 ) def lowerCAmelCase__ ( self: Optional[int] ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self: Optional[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase__ ( self: List[Any] ): pass @slow def lowerCAmelCase__ ( self: Optional[Any] ): for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = CTRLModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :) def lowerCAmelCase__ ( self: Optional[Any] ): pass @require_torch class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: List[str] ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" ) model.to(UpperCamelCase_ ) __lowerCamelCase = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=UpperCamelCase_ ) # Legal the president is __lowerCamelCase = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a __lowerCamelCase = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ ) self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
80
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ = { 'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig'] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['ConvNextFeatureExtractor'] UpperCAmelCase_ = ['ConvNextImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvNextForImageClassification', 'ConvNextModel', 'ConvNextPreTrainedModel', 'ConvNextBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'TFConvNextForImageClassification', 'TFConvNextModel', 'TFConvNextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
80
def lowerCamelCase__ ( A__ : int = 2000000 ): '''simple docstring''' __lowerCamelCase = [0 for i in range(n + 1 )] __lowerCamelCase = 1 __lowerCamelCase = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , A__ ): __lowerCamelCase = 1 __lowerCamelCase = 0 for i in range(A__ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f"""{solution() = }""")
80
1
import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) UpperCAmelCase_ = logging.getLogger() UpperCAmelCase_ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class lowerCamelCase__( __lowerCamelCase): def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Dict ): os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ ) __lowerCamelCase = {"""source""": """What is love ?""", """target""": """life"""} __lowerCamelCase = {"""train""": 12, """val""": 2, """test""": 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: __lowerCamelCase = """\n""".join([contents[field]] * n_lines[split] ) with open(os.path.join(UpperCamelCase_ , F'{split}.{field}' ) , """w""" ) as f: f.write(UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: str = "pytorch" ): __lowerCamelCase = self.get_auto_remove_tmp_dir() __lowerCamelCase = os.path.join(UpperCamelCase_ , """output""" ) __lowerCamelCase = os.path.join(UpperCamelCase_ , """data""" ) self._create_dummy_data(data_dir=UpperCamelCase_ ) __lowerCamelCase = F'\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n '.split() if gpus > 0: testargs.append(F'--gpus={gpus}' ) if is_apex_available(): testargs.append("""--fp16""" ) else: testargs.append("""--gpus=0""" ) testargs.append("""--distributed_backend=ddp_cpu""" ) testargs.append("""--num_processes=2""" ) __lowerCamelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(UpperCamelCase_ , env=self.get_env() ) __lowerCamelCase = os.path.join(UpperCamelCase_ , """metrics.json""" ) with open(UpperCamelCase_ ) as f: __lowerCamelCase = json.load(UpperCamelCase_ ) return result @require_torch_gpu def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 ) @require_torch_multi_gpu def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 ) @require_torch_gpu @require_ray def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = self._run_finetune(gpus=1 , distributed_retriever="""ray""" ) self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 ) @require_torch_multi_gpu @require_ray def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = self._run_finetune(gpus=1 , distributed_retriever="""ray""" ) self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
80
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase): UpperCAmelCase__ : Dict = 1 @register_to_config def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None ): # set `betas`, `alphas`, `timesteps` self.set_timesteps(UpperCamelCase_ ) # standard deviation of the initial noise distribution __lowerCamelCase = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. __lowerCamelCase = 4 # running values __lowerCamelCase = [] def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None ): __lowerCamelCase = num_inference_steps __lowerCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] __lowerCamelCase = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: __lowerCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: __lowerCamelCase = torch.sin(steps * math.pi / 2 ) ** 2 __lowerCamelCase = (1.0 - self.betas**2) ** 0.5 __lowerCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] __lowerCamelCase = timesteps.to(UpperCamelCase_ ) __lowerCamelCase = [] def lowerCAmelCase__ ( self: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True , ): if self.num_inference_steps is None: raise ValueError( """Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" ) __lowerCamelCase = (self.timesteps == timestep).nonzero().item() __lowerCamelCase = timestep_index + 1 __lowerCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(UpperCamelCase_ ) if len(self.ets ) == 1: __lowerCamelCase = self.ets[-1] elif len(self.ets ) == 2: __lowerCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: __lowerCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: __lowerCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) __lowerCamelCase = self._get_prev_sample(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ): return sample def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any ): __lowerCamelCase = self.alphas[timestep_index] __lowerCamelCase = self.betas[timestep_index] __lowerCamelCase = self.alphas[prev_timestep_index] __lowerCamelCase = self.betas[prev_timestep_index] __lowerCamelCase = (sample - sigma * ets) / max(UpperCamelCase_ , 1E-8 ) __lowerCamelCase = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self: List[Any] ): return self.config.num_train_timesteps
80
1
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model') @require_sentencepiece @require_tokenizers class lowerCamelCase__( __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : List[str] = SpeechTaTokenizer UpperCAmelCase__ : str = False UpperCAmelCase__ : int = True def lowerCAmelCase__ ( self: str ): super().setUp() # We have a SentencePiece fixture for testing __lowerCamelCase = SpeechTaTokenizer(UpperCamelCase_ ) __lowerCamelCase = AddedToken("""<mask>""" , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) __lowerCamelCase = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple ): __lowerCamelCase = """this is a test""" __lowerCamelCase = """this is a test""" return input_text, output_text def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int]=False , UpperCamelCase_: Dict=20 , UpperCamelCase_: List[str]=5 ): __lowerCamelCase, __lowerCamelCase = self.get_input_output_texts(UpperCamelCase_ ) __lowerCamelCase = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) __lowerCamelCase = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) return text, ids def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = """<pad>""" __lowerCamelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-4] , """œ""" ) self.assertEqual(vocab_keys[-2] , """<mask>""" ) self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" ) self.assertEqual(len(UpperCamelCase_ ) , 81 ) def lowerCAmelCase__ ( self: Optional[int] ): self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = self.get_tokenizers(do_lower_case=UpperCamelCase_ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): __lowerCamelCase = tokenizer.vocab_size __lowerCamelCase = len(UpperCamelCase_ ) self.assertNotEqual(UpperCamelCase_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) __lowerCamelCase = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""] __lowerCamelCase = tokenizer.add_tokens(UpperCamelCase_ ) __lowerCamelCase = tokenizer.vocab_size __lowerCamelCase = len(UpperCamelCase_ ) self.assertNotEqual(UpperCamelCase_ , 0 ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , len(UpperCamelCase_ ) ) self.assertEqual(UpperCamelCase_ , all_size + len(UpperCamelCase_ ) ) __lowerCamelCase = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCamelCase_ ) self.assertGreaterEqual(len(UpperCamelCase_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) __lowerCamelCase = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""} __lowerCamelCase = tokenizer.add_special_tokens(UpperCamelCase_ ) __lowerCamelCase = tokenizer.vocab_size __lowerCamelCase = len(UpperCamelCase_ ) self.assertNotEqual(UpperCamelCase_ , 0 ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , len(UpperCamelCase_ ) ) self.assertEqual(UpperCamelCase_ , all_size_a + len(UpperCamelCase_ ) ) __lowerCamelCase = tokenizer.encode( """>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCamelCase_ ) self.assertGreaterEqual(len(UpperCamelCase_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def lowerCAmelCase__ ( self: Union[str, Any] ): pass def lowerCAmelCase__ ( self: int ): pass def lowerCAmelCase__ ( self: int ): __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = tokenizer.tokenize("""This is a test""" ) # fmt: off self.assertListEqual(UpperCamelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) __lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCamelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) __lowerCamelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) # fmt: off self.assertListEqual(UpperCamelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on __lowerCamelCase = tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) self.assertListEqual( UpperCamelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] ) @slow def lowerCAmelCase__ ( self: Tuple ): # Use custom sequence because this tokenizer does not handle numbers. __lowerCamelCase = [ """Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """ """general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """ """Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """ """models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""", """BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """ """conditioning on both left and right context in all layers.""", """The quick brown fox jumps over the lazy dog.""", ] # fmt: off __lowerCamelCase = { """input_ids""": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCamelCase_ , )
80
import os from collections.abc import Iterator def lowerCamelCase__ ( A__ : str = "." ): '''simple docstring''' for dir_path, dir_names, filenames in os.walk(A__ ): __lowerCamelCase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(A__ )[1] in (".py", ".ipynb"): yield os.path.join(A__ , A__ ).lstrip("""./""" ) def lowerCamelCase__ ( A__ : Optional[int] ): '''simple docstring''' return f'{i * " "}*' if i else "\n##" def lowerCamelCase__ ( A__ : str , A__ : str ): '''simple docstring''' __lowerCamelCase = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part: print(f'{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}' ) return new_path def lowerCamelCase__ ( A__ : str = "." ): '''simple docstring''' __lowerCamelCase = """""" for filepath in sorted(good_file_paths(A__ ) ): __lowerCamelCase, __lowerCamelCase = os.path.split(A__ ) if filepath != old_path: __lowerCamelCase = print_path(A__ , A__ ) __lowerCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0 __lowerCamelCase = f'{filepath}/{filename}'.replace(""" """ , """%20""" ) __lowerCamelCase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0] print(f'{md_prefix(A__ )} [{filename}]({url})' ) if __name__ == "__main__": print_directory_md('.')
80
1
from __future__ import annotations from math import ceil, floor, sqrt def lowerCamelCase__ ( A__ : int = 2000000 ): '''simple docstring''' __lowerCamelCase = [0] __lowerCamelCase = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target __lowerCamelCase = 0 # the area corresponding to the grid that gives the product closest to target __lowerCamelCase = 0 # an estimate of b, using the quadratic formula __lowerCamelCase = 42 # the largest integer less than b_estimate __lowerCamelCase = 42 # the largest integer less than b_estimate __lowerCamelCase = 42 # the triangle number corresponding to b_floor __lowerCamelCase = 42 # the triangle number corresponding to b_ceil __lowerCamelCase = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): __lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 __lowerCamelCase = floor(A__ ) __lowerCamelCase = ceil(A__ ) __lowerCamelCase = triangle_numbers[b_floor] __lowerCamelCase = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): __lowerCamelCase = triangle_b_first_guess * triangle_a __lowerCamelCase = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): __lowerCamelCase = triangle_b_second_guess * triangle_a __lowerCamelCase = idx_a * b_ceil return area if __name__ == "__main__": print(f"""{solution() = }""")
80
from __future__ import annotations def lowerCamelCase__ ( A__ : list ): '''simple docstring''' if not nums: raise ValueError("""List is empty""" ) return sum(A__ ) / len(A__ ) if __name__ == "__main__": import doctest doctest.testmod()
80
1
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class lowerCamelCase__: def __init__( self: Any , UpperCamelCase_: Any , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Dict=True , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Optional[int]=99 , UpperCamelCase_: Optional[Any]=32 , UpperCamelCase_: Any=2 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: int=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: str=16 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: List[Any]=0.02 , UpperCamelCase_: int=3 , UpperCamelCase_: Union[str, Any]=4 , UpperCamelCase_: Dict=None , UpperCamelCase_: Dict=10_00 , ): __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope __lowerCamelCase = range_bbox def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __lowerCamelCase = bbox[i, j, 3] __lowerCamelCase = bbox[i, j, 1] __lowerCamelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: __lowerCamelCase = bbox[i, j, 2] __lowerCamelCase = bbox[i, j, 0] __lowerCamelCase = t __lowerCamelCase = tf.convert_to_tensor(UpperCamelCase_ ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: List[Any] ): __lowerCamelCase = TFLayoutLMModel(config=UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[str] ): __lowerCamelCase = TFLayoutLMForMaskedLM(config=UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self: int , UpperCamelCase_: Tuple , UpperCamelCase_: str , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Any ): __lowerCamelCase = self.num_labels __lowerCamelCase = TFLayoutLMForSequenceClassification(config=UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple ): __lowerCamelCase = self.num_labels __lowerCamelCase = TFLayoutLMForTokenClassification(config=UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any] ): __lowerCamelCase = TFLayoutLMForQuestionAnswering(config=UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ) = config_and_inputs __lowerCamelCase = { """input_ids""": input_ids, """bbox""": bbox, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_tf class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : List[str] = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) UpperCAmelCase__ : str = ( { 'feature-extraction': TFLayoutLMModel, 'fill-mask': TFLayoutLMForMaskedLM, 'text-classification': TFLayoutLMForSequenceClassification, 'token-classification': TFLayoutLMForTokenClassification, 'zero-shot': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : Tuple = True UpperCAmelCase__ : Any = 10 def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = TFLayoutLMModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 ) def lowerCAmelCase__ ( self: Optional[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ ) @slow def lowerCAmelCase__ ( self: Dict ): for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = TFLayoutLMModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @unittest.skip("""Onnx compliancy broke with TF 2.10""" ) def lowerCAmelCase__ ( self: Dict ): pass def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231 __lowerCamelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 __lowerCamelCase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 __lowerCamelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) __lowerCamelCase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class lowerCamelCase__( unittest.TestCase): @slow def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = TFLayoutLMModel.from_pretrained("""microsoft/layoutlm-base-uncased""" ) __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = prepare_layoutlm_batch_inputs() # forward pass __lowerCamelCase = model(input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) # test the sequence output on [0, :3, :3] __lowerCamelCase = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1E-3 ) ) # test the pooled output on [1, :3] __lowerCamelCase = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , UpperCamelCase_ , atol=1E-3 ) ) @slow def lowerCAmelCase__ ( self: Dict ): # initialize model with randomly initialized sequence classification head __lowerCamelCase = TFLayoutLMForSequenceClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=2 ) __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = prepare_layoutlm_batch_inputs() # forward pass __lowerCamelCase = model( input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar __lowerCamelCase = outputs.loss __lowerCamelCase = (2,) self.assertEqual(loss.shape , UpperCamelCase_ ) # test the shape of the logits __lowerCamelCase = outputs.logits __lowerCamelCase = (2, 2) self.assertEqual(logits.shape , UpperCamelCase_ ) @slow def lowerCAmelCase__ ( self: Union[str, Any] ): # initialize model with randomly initialized token classification head __lowerCamelCase = TFLayoutLMForTokenClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=13 ) __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = prepare_layoutlm_batch_inputs() # forward pass __lowerCamelCase = model( input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) # test the shape of the logits __lowerCamelCase = outputs.logits __lowerCamelCase = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , UpperCamelCase_ ) @slow def lowerCAmelCase__ ( self: str ): # initialize model with randomly initialized token classification head __lowerCamelCase = TFLayoutLMForQuestionAnswering.from_pretrained("""microsoft/layoutlm-base-uncased""" ) __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = prepare_layoutlm_batch_inputs() # forward pass __lowerCamelCase = model(input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) # test the shape of the logits __lowerCamelCase = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , UpperCamelCase_ ) self.assertEqual(outputs.end_logits.shape , UpperCamelCase_ )
80
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase): UpperCAmelCase__ : Any = 'maskformer-swin' UpperCAmelCase__ : List[Any] = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self: Any , UpperCamelCase_: Any=2_24 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Optional[int]=96 , UpperCamelCase_: List[str]=[2, 2, 6, 2] , UpperCamelCase_: Optional[Any]=[3, 6, 12, 24] , UpperCamelCase_: str=7 , UpperCamelCase_: int=4.0 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=1E-5 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] , ): super().__init__(**UpperCamelCase_ ) __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = num_channels __lowerCamelCase = embed_dim __lowerCamelCase = depths __lowerCamelCase = len(UpperCamelCase_ ) __lowerCamelCase = num_heads __lowerCamelCase = window_size __lowerCamelCase = mlp_ratio __lowerCamelCase = qkv_bias __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = drop_path_rate __lowerCamelCase = hidden_act __lowerCamelCase = use_absolute_embeddings __lowerCamelCase = layer_norm_eps __lowerCamelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowerCamelCase = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) ) __lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(UpperCamelCase_ ) + 1 )] __lowerCamelCase, __lowerCamelCase = get_aligned_output_features_output_indices( out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
80
1
from functools import lru_cache def lowerCamelCase__ ( A__ : int ): '''simple docstring''' __lowerCamelCase = 2 __lowerCamelCase = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(A__ ) if n > 1: factors.add(A__ ) return factors @lru_cache def lowerCamelCase__ ( A__ : int ): '''simple docstring''' return len(unique_prime_factors(A__ ) ) def lowerCamelCase__ ( A__ : list ): '''simple docstring''' return len(set(A__ ) ) in (0, 1) def lowerCamelCase__ ( A__ : int ): '''simple docstring''' __lowerCamelCase = 2 while True: # Increment each value of a generated range __lowerCamelCase = [base + i for i in range(A__ )] # Run elements through out unique_prime_factors function # Append our target number to the end. __lowerCamelCase = [upf_len(A__ ) for x in group] checker.append(A__ ) # If all numbers in the list are equal, return the group variable. if equality(A__ ): return group # Increment our base variable by 1 base += 1 def lowerCamelCase__ ( A__ : int = 4 ): '''simple docstring''' __lowerCamelCase = run(A__ ) return results[0] if len(A__ ) else None if __name__ == "__main__": print(solution())
80
from __future__ import annotations def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): __lowerCamelCase, __lowerCamelCase = array[indexa], array[indexa] def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if length > 1: __lowerCamelCase = int(length / 2 ) for i in range(A__ , low + middle ): comp_and_swap(A__ , A__ , i + middle , A__ ) bitonic_merge(A__ , A__ , A__ , A__ ) bitonic_merge(A__ , low + middle , A__ , A__ ) def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if length > 1: __lowerCamelCase = int(length / 2 ) bitonic_sort(A__ , A__ , A__ , 1 ) bitonic_sort(A__ , low + middle , A__ , 0 ) bitonic_merge(A__ , A__ , A__ , A__ ) if __name__ == "__main__": UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(',')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('\nSorted array in ascending order is: ', end='') print(*unsorted, sep=', ') bitonic_merge(unsorted, 0, len(unsorted), 0) print('Sorted array in descending order is: ', end='') print(*unsorted, sep=', ')
80
1
import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class lowerCamelCase__( __lowerCamelCase): # to overwrite at feature extractactor specific tests UpperCAmelCase__ : Any = None UpperCAmelCase__ : List[Any] = None @property def lowerCAmelCase__ ( self: Union[str, Any] ): return self.feat_extract_tester.prepare_feat_extract_dict() def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(UpperCamelCase_ , """feature_size""" ) ) self.assertTrue(hasattr(UpperCamelCase_ , """sampling_rate""" ) ) self.assertTrue(hasattr(UpperCamelCase_ , """padding_value""" ) ) def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = self.feat_extract_tester.prepare_inputs_for_common() __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) __lowerCamelCase = feat_extract.model_input_names[0] __lowerCamelCase = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) for x, y in zip(UpperCamelCase_ , processed_features[input_name] ) ) ) __lowerCamelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase_ ) __lowerCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" ) __lowerCamelCase = processed_features[input_name] if len(batch_features_input.shape ) < 3: __lowerCamelCase = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase_ ) __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) __lowerCamelCase = feat_extract.model_input_names[0] __lowerCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" ) __lowerCamelCase = processed_features[input_name] if len(batch_features_input.shape ) < 3: __lowerCamelCase = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def lowerCAmelCase__ ( self: int ): __lowerCamelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase_ ) __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) __lowerCamelCase = feat_extract.model_input_names[0] __lowerCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" ) __lowerCamelCase = processed_features[input_name] if len(batch_features_input.shape ) < 3: __lowerCamelCase = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Dict=False ): def _inputs_have_equal_length(UpperCamelCase_: Tuple ): __lowerCamelCase = len(input[0] ) for input_slice in input[1:]: if len(UpperCamelCase_ ) != length: return False return True def _inputs_are_equal(UpperCamelCase_: str , UpperCamelCase_: Optional[int] ): if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): return False for input_slice_a, input_slice_a in zip(UpperCamelCase_ , UpperCamelCase_ ): if not np.allclose(np.asarray(UpperCamelCase_ ) , np.asarray(UpperCamelCase_ ) , atol=1E-3 ): return False return True __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) __lowerCamelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase_ ) __lowerCamelCase = feat_extract.model_input_names[0] __lowerCamelCase = BatchFeature({input_name: speech_inputs} ) __lowerCamelCase = self.feat_extract_tester.seq_length_diff __lowerCamelCase = self.feat_extract_tester.max_seq_length + pad_diff __lowerCamelCase = self.feat_extract_tester.min_seq_length __lowerCamelCase = self.feat_extract_tester.batch_size __lowerCamelCase = self.feat_extract_tester.feature_size # test padding for List[int] + numpy __lowerCamelCase = feat_extract.pad(UpperCamelCase_ , padding=UpperCamelCase_ ) __lowerCamelCase = input_a[input_name] __lowerCamelCase = feat_extract.pad(UpperCamelCase_ , padding="""longest""" ) __lowerCamelCase = input_a[input_name] __lowerCamelCase = feat_extract.pad(UpperCamelCase_ , padding="""max_length""" , max_length=len(speech_inputs[-1] ) ) __lowerCamelCase = input_a[input_name] __lowerCamelCase = feat_extract.pad(UpperCamelCase_ , padding="""longest""" , return_tensors="""np""" ) __lowerCamelCase = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(UpperCamelCase_ ): feat_extract.pad(UpperCamelCase_ , padding="""max_length""" )[input_name] __lowerCamelCase = feat_extract.pad( UpperCamelCase_ , padding="""max_length""" , max_length=UpperCamelCase_ , return_tensors="""np""" ) __lowerCamelCase = input_a[input_name] self.assertFalse(_inputs_have_equal_length(UpperCamelCase_ ) ) self.assertTrue(_inputs_have_equal_length(UpperCamelCase_ ) ) self.assertTrue(_inputs_have_equal_length(UpperCamelCase_ ) ) self.assertTrue(_inputs_are_equal(UpperCamelCase_ , UpperCamelCase_ ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy __lowerCamelCase = feat_extract.pad(UpperCamelCase_ , pad_to_multiple_of=10 ) __lowerCamelCase = input_a[input_name] __lowerCamelCase = feat_extract.pad(UpperCamelCase_ , padding="""longest""" , pad_to_multiple_of=10 ) __lowerCamelCase = input_a[input_name] __lowerCamelCase = feat_extract.pad( UpperCamelCase_ , padding="""max_length""" , pad_to_multiple_of=10 , max_length=UpperCamelCase_ ) __lowerCamelCase = input_a[input_name] __lowerCamelCase = feat_extract.pad( UpperCamelCase_ , padding="""max_length""" , pad_to_multiple_of=10 , max_length=UpperCamelCase_ , return_tensors="""np""" , ) __lowerCamelCase = input_a[input_name] self.assertTrue(all(len(UpperCamelCase_ ) % 10 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(UpperCamelCase_ , UpperCamelCase_ ) ) __lowerCamelCase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(UpperCamelCase_ ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct __lowerCamelCase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1E-3 ) def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Any=False ): def _inputs_have_equal_length(UpperCamelCase_: List[str] ): __lowerCamelCase = len(input[0] ) for input_slice in input[1:]: if len(UpperCamelCase_ ) != length: return False return True def _inputs_are_equal(UpperCamelCase_: List[Any] , UpperCamelCase_: int ): if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): return False for input_slice_a, input_slice_a in zip(UpperCamelCase_ , UpperCamelCase_ ): if not np.allclose(np.asarray(UpperCamelCase_ ) , np.asarray(UpperCamelCase_ ) , atol=1E-3 ): return False return True __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) __lowerCamelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase_ ) __lowerCamelCase = feat_extract.model_input_names[0] __lowerCamelCase = BatchFeature({input_name: speech_inputs} ) # truncate to smallest __lowerCamelCase = feat_extract.pad( UpperCamelCase_ , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=UpperCamelCase_ ) __lowerCamelCase = input_a[input_name] __lowerCamelCase = feat_extract.pad(UpperCamelCase_ , padding="""max_length""" , max_length=len(speech_inputs[0] ) ) __lowerCamelCase = input_a[input_name] self.assertTrue(_inputs_have_equal_length(UpperCamelCase_ ) ) self.assertFalse(_inputs_have_equal_length(UpperCamelCase_ ) ) # truncate to smallest with np __lowerCamelCase = feat_extract.pad( UpperCamelCase_ , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=UpperCamelCase_ , ) __lowerCamelCase = input_a[input_name] __lowerCamelCase = feat_extract.pad( UpperCamelCase_ , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" ) __lowerCamelCase = input_a[input_name] self.assertTrue(_inputs_have_equal_length(UpperCamelCase_ ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(UpperCamelCase_ ) ) # truncate to middle __lowerCamelCase = feat_extract.pad( UpperCamelCase_ , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=UpperCamelCase_ , return_tensors="""np""" , ) __lowerCamelCase = input_a[input_name] __lowerCamelCase = feat_extract.pad( UpperCamelCase_ , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=UpperCamelCase_ ) __lowerCamelCase = input_a[input_name] __lowerCamelCase = feat_extract.pad( UpperCamelCase_ , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" ) __lowerCamelCase = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(UpperCamelCase_ ) ) self.assertTrue(_inputs_have_equal_length(UpperCamelCase_ ) ) self.assertTrue(_inputs_are_equal(UpperCamelCase_ , UpperCamelCase_ ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(UpperCamelCase_ ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(UpperCamelCase_ ): feat_extract.pad(UpperCamelCase_ , truncation=UpperCamelCase_ )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(UpperCamelCase_ ): feat_extract.pad(UpperCamelCase_ , padding="""longest""" , truncation=UpperCamelCase_ )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(UpperCamelCase_ ): feat_extract.pad(UpperCamelCase_ , padding="""longest""" , truncation=UpperCamelCase_ )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(UpperCamelCase_ ): feat_extract.pad(UpperCamelCase_ , padding="""max_length""" , truncation=UpperCamelCase_ )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy __lowerCamelCase = 12 __lowerCamelCase = feat_extract.pad( UpperCamelCase_ , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCamelCase_ , truncation=UpperCamelCase_ , ) __lowerCamelCase = input_a[input_name] __lowerCamelCase = feat_extract.pad( UpperCamelCase_ , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCamelCase_ , ) __lowerCamelCase = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of __lowerCamelCase = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: __lowerCamelCase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(UpperCamelCase_ ) ) self.assertFalse(_inputs_have_equal_length(UpperCamelCase_ ) ) def lowerCAmelCase__ ( self: Union[str, Any] ): self._check_padding(numpify=UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] ): self._check_padding(numpify=UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] ): self._check_truncation(numpify=UpperCamelCase_ ) def lowerCAmelCase__ ( self: Union[str, Any] ): self._check_truncation(numpify=UpperCamelCase_ ) @require_torch def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) __lowerCamelCase = self.feat_extract_tester.prepare_inputs_for_common() __lowerCamelCase = feat_extract.model_input_names[0] __lowerCamelCase = BatchFeature({input_name: speech_inputs} ) __lowerCamelCase = feat_extract.pad(UpperCamelCase_ , padding="""longest""" , return_tensors="""np""" )[input_name] __lowerCamelCase = feat_extract.pad(UpperCamelCase_ , padding="""longest""" , return_tensors="""pt""" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) @require_tf def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) __lowerCamelCase = self.feat_extract_tester.prepare_inputs_for_common() __lowerCamelCase = feat_extract.model_input_names[0] __lowerCamelCase = BatchFeature({input_name: speech_inputs} ) __lowerCamelCase = feat_extract.pad(UpperCamelCase_ , padding="""longest""" , return_tensors="""np""" )[input_name] __lowerCamelCase = feat_extract.pad(UpperCamelCase_ , padding="""longest""" , return_tensors="""tf""" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = self.feat_extract_dict __lowerCamelCase = True __lowerCamelCase = self.feature_extraction_class(**UpperCamelCase_ ) __lowerCamelCase = self.feat_extract_tester.prepare_inputs_for_common() __lowerCamelCase = [len(UpperCamelCase_ ) for x in speech_inputs] __lowerCamelCase = feat_extract.model_input_names[0] __lowerCamelCase = BatchFeature({input_name: speech_inputs} ) __lowerCamelCase = feat_extract.pad(UpperCamelCase_ , padding="""longest""" , return_tensors="""np""" ) self.assertIn("""attention_mask""" , UpperCamelCase_ ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = self.feat_extract_dict __lowerCamelCase = True __lowerCamelCase = self.feature_extraction_class(**UpperCamelCase_ ) __lowerCamelCase = self.feat_extract_tester.prepare_inputs_for_common() __lowerCamelCase = [len(UpperCamelCase_ ) for x in speech_inputs] __lowerCamelCase = feat_extract.model_input_names[0] __lowerCamelCase = BatchFeature({input_name: speech_inputs} ) __lowerCamelCase = min(UpperCamelCase_ ) __lowerCamelCase = feat_extract.pad( UpperCamelCase_ , padding="""max_length""" , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors="""np""" ) self.assertIn("""attention_mask""" , UpperCamelCase_ ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
80
from ... import PretrainedConfig UpperCAmelCase_ = { 'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json', } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP UpperCAmelCase__ : Dict = 'nezha' def __init__( self: Dict , UpperCamelCase_: Any=2_11_28 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Optional[int]=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Union[str, Any]=5_12 , UpperCamelCase_: Any=64 , UpperCamelCase_: Dict=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: Optional[Any]=1E-12 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Any=0 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: str=True , **UpperCamelCase_: Any , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = max_relative_position __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = classifier_dropout __lowerCamelCase = use_cache
80
1
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase): UpperCAmelCase__ : Dict = 1 @register_to_config def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None ): # set `betas`, `alphas`, `timesteps` self.set_timesteps(UpperCamelCase_ ) # standard deviation of the initial noise distribution __lowerCamelCase = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. __lowerCamelCase = 4 # running values __lowerCamelCase = [] def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None ): __lowerCamelCase = num_inference_steps __lowerCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] __lowerCamelCase = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: __lowerCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: __lowerCamelCase = torch.sin(steps * math.pi / 2 ) ** 2 __lowerCamelCase = (1.0 - self.betas**2) ** 0.5 __lowerCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] __lowerCamelCase = timesteps.to(UpperCamelCase_ ) __lowerCamelCase = [] def lowerCAmelCase__ ( self: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True , ): if self.num_inference_steps is None: raise ValueError( """Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" ) __lowerCamelCase = (self.timesteps == timestep).nonzero().item() __lowerCamelCase = timestep_index + 1 __lowerCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(UpperCamelCase_ ) if len(self.ets ) == 1: __lowerCamelCase = self.ets[-1] elif len(self.ets ) == 2: __lowerCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: __lowerCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: __lowerCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) __lowerCamelCase = self._get_prev_sample(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ): return sample def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any ): __lowerCamelCase = self.alphas[timestep_index] __lowerCamelCase = self.betas[timestep_index] __lowerCamelCase = self.alphas[prev_timestep_index] __lowerCamelCase = self.betas[prev_timestep_index] __lowerCamelCase = (sample - sigma * ets) / max(UpperCamelCase_ , 1E-8 ) __lowerCamelCase = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self: List[Any] ): return self.config.num_train_timesteps
80
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__: def __init__( self: Union[str, Any] , UpperCamelCase_: str = None , UpperCamelCase_: uuid.UUID = None , UpperCamelCase_: Dict=None , UpperCamelCase_: Any=None ): if not conversation_id: __lowerCamelCase = uuid.uuida() if past_user_inputs is None: __lowerCamelCase = [] if generated_responses is None: __lowerCamelCase = [] __lowerCamelCase = conversation_id __lowerCamelCase = past_user_inputs __lowerCamelCase = generated_responses __lowerCamelCase = text def __eq__( self: Optional[Any] , UpperCamelCase_: Union[str, Any] ): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowerCAmelCase__ ( self: int , UpperCamelCase_: str , UpperCamelCase_: bool = False ): if self.new_user_input: if overwrite: logger.warning( F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ' F'with: "{text}".' ) __lowerCamelCase = text else: logger.warning( F'User input added while unprocessed input was existing: "{self.new_user_input}" new input ' F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' ) else: __lowerCamelCase = text def lowerCAmelCase__ ( self: List[str] ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __lowerCamelCase = None def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ): self.generated_responses.append(UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self: Union[str, Any] ): __lowerCamelCase = F'Conversation id: {self.uuid} \n' for is_user, text in self.iter_texts(): __lowerCamelCase = """user""" if is_user else """bot""" output += F'{name} >> {text} \n' return output @add_end_docstrings( __lowerCamelCase , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , ) class lowerCamelCase__( __lowerCamelCase): def __init__( self: List[str] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: str ): super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) if self.tokenizer.pad_token_id is None: __lowerCamelCase = self.tokenizer.eos_token def lowerCAmelCase__ ( self: str , UpperCamelCase_: int=None , UpperCamelCase_: Any=None , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: int ): __lowerCamelCase = {} __lowerCamelCase = {} __lowerCamelCase = {} if min_length_for_response is not None: __lowerCamelCase = min_length_for_response if minimum_tokens is not None: __lowerCamelCase = minimum_tokens if "max_length" in generate_kwargs: __lowerCamelCase = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __lowerCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(UpperCamelCase_ ) return preprocess_params, forward_params, postprocess_params def __call__( self: Any , UpperCamelCase_: Union[Conversation, List[Conversation]] , UpperCamelCase_: Optional[int]=0 , **UpperCamelCase_: Optional[int] ): __lowerCamelCase = super().__call__(UpperCamelCase_ , num_workers=UpperCamelCase_ , **UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1: return outputs[0] return outputs def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Conversation , UpperCamelCase_: Optional[Any]=32 ): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ' """Add user inputs with the conversation's `add_user_input` method""" ) if hasattr(self.tokenizer , """_build_conversation_input_ids""" ): __lowerCamelCase = self.tokenizer._build_conversation_input_ids(UpperCamelCase_ ) else: # If the tokenizer cannot handle conversations, we default to only the old version __lowerCamelCase = self._legacy_parse_and_tokenize(UpperCamelCase_ ) if self.framework == "pt": __lowerCamelCase = torch.LongTensor([input_ids] ) elif self.framework == "tf": __lowerCamelCase = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str=10 , **UpperCamelCase_: List[str] ): __lowerCamelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length ) __lowerCamelCase = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' ) __lowerCamelCase = max_length - minimum_tokens __lowerCamelCase = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: __lowerCamelCase = model_inputs["""attention_mask"""][:, -trim:] __lowerCamelCase = model_inputs.pop("""conversation""" ) __lowerCamelCase = max_length __lowerCamelCase = self.model.generate(**UpperCamelCase_ , **UpperCamelCase_ ) if self.model.config.is_encoder_decoder: __lowerCamelCase = 1 else: __lowerCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int=True ): __lowerCamelCase = model_outputs["""output_ids"""] __lowerCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , ) __lowerCamelCase = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(UpperCamelCase_ ) return conversation def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Conversation ): __lowerCamelCase = self.tokenizer.eos_token_id __lowerCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) if len(UpperCamelCase_ ) > self.tokenizer.model_max_length: __lowerCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
80
1
import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def lowerCamelCase__ ( A__ : Tuple ): '''simple docstring''' __lowerCamelCase = fname.split(os.path.sep )[-1] return re.search(R"""^(.*)_\d+\.jpg$""" , A__ ).groups()[0] class lowerCamelCase__( __lowerCamelCase): def __init__( self: int , UpperCamelCase_: str , UpperCamelCase_: List[Any]=None , UpperCamelCase_: List[Any]=None ): __lowerCamelCase = file_names __lowerCamelCase = image_transform __lowerCamelCase = label_to_id def __len__( self: Tuple ): return len(self.file_names ) def __getitem__( self: Tuple , UpperCamelCase_: Dict ): __lowerCamelCase = self.file_names[idx] __lowerCamelCase = PIL.Image.open(UpperCamelCase_ ) __lowerCamelCase = raw_image.convert("""RGB""" ) if self.image_transform is not None: __lowerCamelCase = self.image_transform(UpperCamelCase_ ) __lowerCamelCase = extract_label(UpperCamelCase_ ) if self.label_to_id is not None: __lowerCamelCase = self.label_to_id[label] return {"image": image, "label": label} def lowerCamelCase__ ( A__ : List[Any] , A__ : List[Any] ): '''simple docstring''' if args.with_tracking: __lowerCamelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir ) else: __lowerCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCamelCase = config["""lr"""] __lowerCamelCase = int(config["""num_epochs"""] ) __lowerCamelCase = int(config["""seed"""] ) __lowerCamelCase = int(config["""batch_size"""] ) __lowerCamelCase = config["""image_size"""] if not isinstance(A__ , (list, tuple) ): __lowerCamelCase = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , """isdigit""" ): if args.checkpointing_steps == "epoch": __lowerCamelCase = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): __lowerCamelCase = int(args.checkpointing_steps ) else: raise ValueError( f'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' ) else: __lowerCamelCase = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: __lowerCamelCase = os.path.split(A__ )[-1].split(""".""" )[0] accelerator.init_trackers(A__ , A__ ) # Grab all the image filenames __lowerCamelCase = [os.path.join(args.data_dir , A__ ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )] # Build the label correspondences __lowerCamelCase = [extract_label(A__ ) for fname in file_names] __lowerCamelCase = list(set(A__ ) ) id_to_label.sort() __lowerCamelCase = {lbl: i for i, lbl in enumerate(A__ )} # Set the seed before splitting the data. np.random.seed(A__ ) torch.manual_seed(A__ ) torch.cuda.manual_seed_all(A__ ) # Split our filenames between train and validation __lowerCamelCase = np.random.permutation(len(A__ ) ) __lowerCamelCase = int(0.8 * len(A__ ) ) __lowerCamelCase = random_perm[:cut] __lowerCamelCase = random_perm[cut:] # For training we use a simple RandomResizedCrop __lowerCamelCase = Compose([RandomResizedCrop(A__ , scale=(0.5, 1.0) ), ToTensor()] ) __lowerCamelCase = PetsDataset( [file_names[i] for i in train_split] , image_transform=A__ , label_to_id=A__ ) # For evaluation, we use a deterministic Resize __lowerCamelCase = Compose([Resize(A__ ), ToTensor()] ) __lowerCamelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=A__ , label_to_id=A__ ) # Instantiate dataloaders. __lowerCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 ) __lowerCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCamelCase = create_model("""resnet50d""" , pretrained=A__ , num_classes=len(A__ ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCamelCase = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): __lowerCamelCase = False for param in model.get_classifier().parameters(): __lowerCamelCase = True # We normalize the batches of images to be a bit faster. __lowerCamelCase = torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device ) __lowerCamelCase = torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer __lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 ) # Instantiate learning rate scheduler __lowerCamelCase = OneCycleLR(optimizer=A__ , max_lr=A__ , epochs=A__ , steps_per_epoch=len(A__ ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare( A__ , A__ , A__ , A__ , A__ ) # We need to keep track of how many total steps we have iterated over __lowerCamelCase = 0 # We also need to keep track of the starting epoch so files are named properly __lowerCamelCase = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}' ) accelerator.load_state(args.resume_from_checkpoint ) __lowerCamelCase = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint __lowerCamelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) __lowerCamelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` __lowerCamelCase = os.path.splitext(A__ )[0] if "epoch" in training_difference: __lowerCamelCase = int(training_difference.replace("""epoch_""" , """""" ) ) + 1 __lowerCamelCase = None else: __lowerCamelCase = int(training_difference.replace("""step_""" , """""" ) ) __lowerCamelCase = resume_step // len(A__ ) resume_step -= starting_epoch * len(A__ ) # Now we train the model for epoch in range(A__ , A__ ): model.train() if args.with_tracking: __lowerCamelCase = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step __lowerCamelCase = accelerator.skip_first_batches(A__ , A__ ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader __lowerCamelCase = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. __lowerCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()} __lowerCamelCase = (batch["""image"""] - mean) / std __lowerCamelCase = model(A__ ) __lowerCamelCase = torch.nn.functional.cross_entropy(A__ , batch["""label"""] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(A__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(A__ , A__ ): __lowerCamelCase = f'step_{overall_step}' if overall_step % checkpointing_steps == 0: if args.output_dir is not None: __lowerCamelCase = os.path.join(args.output_dir , A__ ) accelerator.save_state(A__ ) model.eval() __lowerCamelCase = 0 __lowerCamelCase = 0 for step, batch in enumerate(A__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. __lowerCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()} __lowerCamelCase = (batch["""image"""] - mean) / std with torch.no_grad(): __lowerCamelCase = model(A__ ) __lowerCamelCase = outputs.argmax(dim=-1 ) __lowerCamelCase, __lowerCamelCase = accelerator.gather_for_metrics((predictions, batch["""label"""]) ) __lowerCamelCase = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() __lowerCamelCase = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}: {100 * eval_metric:.2f}' ) if args.with_tracking: accelerator.log( { """accuracy""": 100 * eval_metric, """train_loss""": total_loss.item() / len(A__ ), """epoch""": epoch, } , step=A__ , ) if checkpointing_steps == "epoch": __lowerCamelCase = f'epoch_{epoch}' if args.output_dir is not None: __lowerCamelCase = os.path.join(args.output_dir , A__ ) accelerator.save_state(A__ ) if args.with_tracking: accelerator.end_training() def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument("""--data_dir""" , required=A__ , help="""The data folder on disk.""" ) parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" ) parser.add_argument( """--mixed_precision""" , type=A__ , default=A__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) parser.add_argument( """--checkpointing_steps""" , type=A__ , default=A__ , help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" , ) parser.add_argument( """--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , ) parser.add_argument( """--project_dir""" , type=A__ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = {"""lr""": 3E-2, """num_epochs""": 3, """seed""": 42, """batch_size""": 64, """image_size""": 224} training_function(A__ , A__ ) if __name__ == "__main__": main()
80
import math def lowerCamelCase__ ( A__ : int ): '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = 2 __lowerCamelCase = int(math.sqrt(A__ ) ) # Size of every segment __lowerCamelCase = [True] * (end + 1) __lowerCamelCase = [] while start <= end: if temp[start] is True: in_prime.append(A__ ) for i in range(start * start , end + 1 , A__ ): __lowerCamelCase = False start += 1 prime += in_prime __lowerCamelCase = end + 1 __lowerCamelCase = min(2 * end , A__ ) while low <= n: __lowerCamelCase = [True] * (high - low + 1) for each in in_prime: __lowerCamelCase = math.floor(low / each ) * each if t < low: t += each for j in range(A__ , high + 1 , A__ ): __lowerCamelCase = False for j in range(len(A__ ) ): if temp[j] is True: prime.append(j + low ) __lowerCamelCase = high + 1 __lowerCamelCase = min(high + end , A__ ) return prime print(sieve(10**6))
80
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase_ = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
80
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model') class lowerCamelCase__( __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : int = BartphoTokenizer UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : List[str] = True def lowerCAmelCase__ ( self: Tuple ): super().setUp() __lowerCamelCase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] __lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) __lowerCamelCase = {"""unk_token""": """<unk>"""} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] ) with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp: for token in vocab_tokens: fp.write(F'{token} {vocab_tokens[token]}\n' ) __lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: List[str] ): kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ): __lowerCamelCase = """This is a là test""" __lowerCamelCase = """This is a<unk><unk> test""" return input_text, output_text def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map ) __lowerCamelCase = """This is a là test""" __lowerCamelCase = """▁This ▁is ▁a ▁l à ▁t est""".split() __lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __lowerCamelCase = tokens + [tokenizer.unk_token] __lowerCamelCase = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
80
1
from __future__ import annotations class lowerCamelCase__: def __init__( self: Optional[int] , UpperCamelCase_: int ): __lowerCamelCase = data __lowerCamelCase = None __lowerCamelCase = None def lowerCamelCase__ ( A__ : Node | None ): # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left ) print(tree.data ) display(tree.right ) def lowerCamelCase__ ( A__ : Node | None ): '''simple docstring''' return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def lowerCamelCase__ ( A__ : Node ): '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def lowerCamelCase__ ( ): # Main function for testing. '''simple docstring''' __lowerCamelCase = Node(1 ) __lowerCamelCase = Node(2 ) __lowerCamelCase = Node(3 ) __lowerCamelCase = Node(4 ) __lowerCamelCase = Node(5 ) __lowerCamelCase = Node(6 ) __lowerCamelCase = Node(7 ) __lowerCamelCase = Node(8 ) __lowerCamelCase = Node(9 ) print(is_full_binary_tree(A__ ) ) print(depth_of_tree(A__ ) ) print("""Tree is: """ ) display(A__ ) if __name__ == "__main__": main()
80
def lowerCamelCase__ ( A__ : dict ): '''simple docstring''' __lowerCamelCase = set() # To detect a back edge, keep track of vertices currently in the recursion stack __lowerCamelCase = set() return any( node not in visited and depth_first_search(A__ , A__ , A__ , A__ ) for node in graph ) def lowerCamelCase__ ( A__ : dict , A__ : int , A__ : set , A__ : set ): '''simple docstring''' visited.add(A__ ) rec_stk.add(A__ ) for node in graph[vertex]: if node not in visited: if depth_first_search(A__ , A__ , A__ , A__ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(A__ ) return False if __name__ == "__main__": from doctest import testmod testmod()
80
1
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class lowerCamelCase__( __lowerCamelCase): def __init__( self: Optional[int] , UpperCamelCase_: pyspark.sql.DataFrame , UpperCamelCase_: Optional[NamedSplit] = None , UpperCamelCase_: Optional[Features] = None , UpperCamelCase_: bool = True , UpperCamelCase_: str = None , UpperCamelCase_: bool = False , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , UpperCamelCase_: str = "arrow" , **UpperCamelCase_: str , ): super().__init__( split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , **UpperCamelCase_ , ) __lowerCamelCase = load_from_cache_file __lowerCamelCase = file_format __lowerCamelCase = Spark( df=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , working_dir=UpperCamelCase_ , **UpperCamelCase_ , ) def lowerCAmelCase__ ( self: Tuple ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) __lowerCamelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCamelCase_ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
80
from __future__ import annotations def lowerCamelCase__ ( A__ : list[float] , A__ : list[float] ): '''simple docstring''' __lowerCamelCase = sorted(numsa + numsa ) __lowerCamelCase, __lowerCamelCase = divmod(len(A__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = [float(x) for x in input('Enter the elements of first array: ').split()] UpperCAmelCase_ = [float(x) for x in input('Enter the elements of second array: ').split()] print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
80
1
def lowerCamelCase__ ( A__ : int , A__ : list[int] , A__ : int ): '''simple docstring''' def count_of_possible_combinations(A__ : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(A__ ) def lowerCamelCase__ ( A__ : int , A__ : list[int] , A__ : int ): '''simple docstring''' def count_of_possible_combinations_with_dp_array( A__ : int , A__ : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] __lowerCamelCase = sum( count_of_possible_combinations_with_dp_array(target - item , A__ ) for item in array ) __lowerCamelCase = answer return answer __lowerCamelCase = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(A__ , A__ ) def lowerCamelCase__ ( A__ : int , A__ : list[int] , A__ : int ): '''simple docstring''' __lowerCamelCase = [0] * (target + 1) __lowerCamelCase = 1 for i in range(1 , target + 1 ): for j in range(A__ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = 3 UpperCAmelCase_ = 5 UpperCAmelCase_ = [1, 2, 5] print(combination_sum_iv(n, array, target))
80
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: str ): __lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) __lowerCamelCase = get_activation("""gelu""" ) self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) ) self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) ) def lowerCAmelCase__ ( self: str ): __lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) __lowerCamelCase = get_activation("""gelu""" ) __lowerCamelCase = get_activation("""gelu_10""" ) __lowerCamelCase = torch_builtin(UpperCamelCase_ ) __lowerCamelCase = geluaa(UpperCamelCase_ ) __lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def lowerCAmelCase__ ( self: str ): get_activation("""gelu""" ) get_activation("""gelu_10""" ) get_activation("""gelu_fast""" ) get_activation("""gelu_new""" ) get_activation("""gelu_python""" ) get_activation("""gelu_pytorch_tanh""" ) get_activation("""linear""" ) get_activation("""mish""" ) get_activation("""quick_gelu""" ) get_activation("""relu""" ) get_activation("""sigmoid""" ) get_activation("""silu""" ) get_activation("""swish""" ) get_activation("""tanh""" ) with self.assertRaises(UpperCamelCase_ ): get_activation("""bogus""" ) with self.assertRaises(UpperCamelCase_ ): get_activation(UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = get_activation("""gelu""" ) __lowerCamelCase = 1 __lowerCamelCase = get_activation("""gelu""" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(UpperCamelCase_ ): __lowerCamelCase = acta.a
80
1
from __future__ import annotations def lowerCamelCase__ ( A__ : list ): '''simple docstring''' if not nums: raise ValueError("""List is empty""" ) return sum(A__ ) / len(A__ ) if __name__ == "__main__": import doctest doctest.testmod()
80
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class lowerCamelCase__( __lowerCamelCase): @slow @require_torch def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) __lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" ) __lowerCamelCase = bertabert.config.encoder.vocab_size __lowerCamelCase = tokenizer.sep_token_id __lowerCamelCase = tokenizer.cls_token_id __lowerCamelCase = 1_28 __lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) __lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) __lowerCamelCase = train_dataset.select(range(32 ) ) __lowerCamelCase = val_dataset.select(range(16 ) ) __lowerCamelCase = 4 def _map_to_encoder_decoder_inputs(UpperCamelCase_: List[Any] ): # Tokenizer will automatically set [BOS] <text> [EOS] __lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=5_12 ) __lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=1_28 ) __lowerCamelCase = inputs.input_ids __lowerCamelCase = inputs.attention_mask __lowerCamelCase = outputs.input_ids __lowerCamelCase = outputs.input_ids.copy() __lowerCamelCase = [ [-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] __lowerCamelCase = outputs.attention_mask assert all(len(UpperCamelCase_ ) == 5_12 for x in inputs.input_ids ) assert all(len(UpperCamelCase_ ) == 1_28 for x in outputs.input_ids ) return batch def _compute_metrics(UpperCamelCase_: int ): __lowerCamelCase = pred.label_ids __lowerCamelCase = pred.predictions # all unnecessary tokens are removed __lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) __lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) __lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ ) return {"accuracy": accuracy} # map train dataset __lowerCamelCase = train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset __lowerCamelCase = val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) __lowerCamelCase = self.get_auto_remove_tmp_dir() __lowerCamelCase = SeqaSeqTrainingArguments( output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy="""steps""" , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer __lowerCamelCase = SeqaSeqTrainer( model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , ) # start training trainer.train()
80
1
def lowerCamelCase__ ( A__ : str , A__ : str ): '''simple docstring''' __lowerCamelCase = len(A__ ) + 1 __lowerCamelCase = len(A__ ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. __lowerCamelCase = [[0 for i in range(A__ )] for j in range(A__ )] # since string of zero length match pattern of zero length __lowerCamelCase = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , A__ ): __lowerCamelCase = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , A__ ): __lowerCamelCase = dp[0][j - 2] if pattern[j - 1] == """*""" else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , A__ ): for j in range(1 , A__ ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": __lowerCamelCase = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: __lowerCamelCase = 1 elif pattern[j - 2] in (input_string[i - 1], "."): __lowerCamelCase = dp[i - 1][j] else: __lowerCamelCase = 0 else: __lowerCamelCase = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") UpperCAmelCase_ = 'aab' UpperCAmelCase_ = 'c*a*b' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(f"""{input_string} matches the given pattern {pattern}""") else: print(f"""{input_string} does not match with the given pattern {pattern}""")
80
class lowerCamelCase__: # Public class to implement a graph def __init__( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ): __lowerCamelCase = row __lowerCamelCase = col __lowerCamelCase = graph def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ): return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ): # Checking all 8 elements surrounding nth element __lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1] __lowerCamelCase = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] ): # And finally, count all islands. __lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )] __lowerCamelCase = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) count += 1 return count
80
1
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowerCamelCase__: def __init__( self: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str]=13 , UpperCamelCase_: List[Any]=64 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Tuple=True , UpperCamelCase_: int=True , UpperCamelCase_: List[str]=32 , UpperCamelCase_: Dict=5 , UpperCamelCase_: List[Any]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: str="gelu" , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Tuple=10 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[str]=[1, 16, 4, 4] , UpperCamelCase_: Optional[int]=None , ): __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = num_channels __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = scope __lowerCamelCase = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size __lowerCamelCase = (self.image_size // 32) ** 2 __lowerCamelCase = num_patches + 1 def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = self.get_config() return config, pixel_values, labels def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = { """global_padding""": """same""", """layer_type""": """bottleneck""", """depths""": [3, 4, 9], """out_features""": ["""stage1""", """stage2""", """stage3"""], """embedding_dynamic_padding""": True, """hidden_sizes""": [4, 8, 16, 32], """num_groups""": 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCamelCase_ , ) def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: str ): __lowerCamelCase = ViTHybridModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Tuple ): __lowerCamelCase = self.type_sequence_label_size __lowerCamelCase = ViTHybridForImageClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase__ ( self: int ): __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs __lowerCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Optional[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () UpperCAmelCase__ : List[str] = ( {'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification} if is_torch_available() else {} ) UpperCAmelCase__ : Union[str, Any] = False UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : Tuple = False def lowerCAmelCase__ ( self: str ): __lowerCamelCase = ViTHybridModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 ) def lowerCAmelCase__ ( self: Optional[int] ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def lowerCAmelCase__ ( self: List[str] ): pass def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(UpperCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowerCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) ) def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(UpperCamelCase_ ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def lowerCAmelCase__ ( self: str ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: int ): __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase = _config_zero_init(UpperCamelCase_ ) for model_class in self.all_model_classes: __lowerCamelCase = model_class(config=UpperCamelCase_ ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": __lowerCamelCase = [F'{name}.{key}' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def lowerCAmelCase__ ( self: Tuple ): for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = ViTHybridModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCamelCase__( unittest.TestCase): @cached_property def lowerCAmelCase__ ( self: str ): return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( UpperCamelCase_ ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): __lowerCamelCase = model(**UpperCamelCase_ ) # verify the logits __lowerCamelCase = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) __lowerCamelCase = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) ) @slow @require_accelerate def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" ) __lowerCamelCase = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" ) __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ) __lowerCamelCase = model(**UpperCamelCase_ ) __lowerCamelCase = outputs.logits # model predicts one of the 1000 ImageNet classes __lowerCamelCase = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
80
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) def lowerCamelCase__ ( A__ : str ): '''simple docstring''' __lowerCamelCase = DPTConfig() if "large" in checkpoint_url: __lowerCamelCase = 1024 __lowerCamelCase = 4096 __lowerCamelCase = 24 __lowerCamelCase = 16 __lowerCamelCase = [5, 11, 17, 23] __lowerCamelCase = [256, 512, 1024, 1024] __lowerCamelCase = (1, 384, 384) if "ade" in checkpoint_url: __lowerCamelCase = True __lowerCamelCase = 150 __lowerCamelCase = """huggingface/label-files""" __lowerCamelCase = """ade20k-id2label.json""" __lowerCamelCase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="""dataset""" ) ) , """r""" ) ) __lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()} __lowerCamelCase = idalabel __lowerCamelCase = {v: k for k, v in idalabel.items()} __lowerCamelCase = [1, 150, 480, 480] return config, expected_shape def lowerCamelCase__ ( A__ : Dict ): '''simple docstring''' __lowerCamelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(A__ , A__ ) def lowerCamelCase__ ( A__ : Dict ): '''simple docstring''' if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): __lowerCamelCase = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: __lowerCamelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: __lowerCamelCase = name.replace("""patch_embed""" , """patch_embeddings""" ) if "pos_embed" in name: __lowerCamelCase = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: __lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: __lowerCamelCase = name.replace("""proj""" , """projection""" ) if "blocks" in name: __lowerCamelCase = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: __lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name: __lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: __lowerCamelCase = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: __lowerCamelCase = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: __lowerCamelCase = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: __lowerCamelCase = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: __lowerCamelCase = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: __lowerCamelCase = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: __lowerCamelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 __lowerCamelCase = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' ) if "out_conv" in name: __lowerCamelCase = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: __lowerCamelCase = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: __lowerCamelCase = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: __lowerCamelCase = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: __lowerCamelCase = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: __lowerCamelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: __lowerCamelCase = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: __lowerCamelCase = name.replace("""bn""" , """batch_norm""" ) if "head" in name: __lowerCamelCase = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: __lowerCamelCase = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: __lowerCamelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" ) return name def lowerCamelCase__ ( A__ : Tuple , A__ : Any ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' ) __lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __lowerCamelCase = in_proj_weight[: config.hidden_size, :] __lowerCamelCase = in_proj_bias[: config.hidden_size] __lowerCamelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __lowerCamelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __lowerCamelCase = in_proj_weight[ -config.hidden_size :, : ] __lowerCamelCase = in_proj_bias[-config.hidden_size :] def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def lowerCamelCase__ ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : List[str] , A__ : Union[str, Any] ): '''simple docstring''' __lowerCamelCase, __lowerCamelCase = get_dpt_config(A__ ) # load original state_dict from URL __lowerCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(A__ ) # rename keys for key in state_dict.copy().keys(): __lowerCamelCase = state_dict.pop(A__ ) __lowerCamelCase = val # read in qkv matrices read_in_q_k_v(A__ , A__ ) # load HuggingFace model __lowerCamelCase = DPTForSemanticSegmentation(A__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(A__ ) model.load_state_dict(A__ ) model.eval() # Check outputs on an image __lowerCamelCase = 480 if """ade""" in checkpoint_url else 384 __lowerCamelCase = DPTImageProcessor(size=A__ ) __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(A__ , return_tensors="""pt""" ) # forward pass __lowerCamelCase = model(**A__ ).logits if """ade""" in checkpoint_url else model(**A__ ).predicted_depth # Assert logits __lowerCamelCase = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] ) if "ade" in checkpoint_url: __lowerCamelCase = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] ) assert outputs.shape == torch.Size(A__ ) assert ( torch.allclose(outputs[0, 0, :3, :3] , A__ , atol=1E-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , A__ ) ) Path(A__ ).mkdir(exist_ok=A__ ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(A__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(A__ ) if push_to_hub: print("""Pushing model to hub...""" ) model.push_to_hub( repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=A__ , ) image_processor.push_to_hub( repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=A__ , ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) UpperCAmelCase_ = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
80
1
from math import isqrt def lowerCamelCase__ ( A__ : int ): '''simple docstring''' __lowerCamelCase = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , A__ , A__ ): __lowerCamelCase = False return [i for i in range(2 , A__ ) if is_prime[i]] def lowerCamelCase__ ( A__ : int = 10**8 ): '''simple docstring''' __lowerCamelCase = calculate_prime_numbers(max_number // 2 ) __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = len(A__ ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(f"""{solution() = }""")
80
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
80
1
from __future__ import annotations from typing import Any class lowerCamelCase__: def __init__( self: str , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: float = 0 ): __lowerCamelCase, __lowerCamelCase = row, column __lowerCamelCase = [[default_value for c in range(UpperCamelCase_ )] for r in range(UpperCamelCase_ )] def __str__( self: Dict ): __lowerCamelCase = F'Matrix consist of {self.row} rows and {self.column} columns\n' # Make string identifier __lowerCamelCase = 0 for row_vector in self.array: for obj in row_vector: __lowerCamelCase = max(UpperCamelCase_ , len(str(UpperCamelCase_ ) ) ) __lowerCamelCase = F'%{max_element_length}s' # Make string and return def single_line(UpperCamelCase_: list[float] ) -> str: nonlocal string_format_identifier __lowerCamelCase = """[""" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(UpperCamelCase_ ) for row_vector in self.array ) return s def __repr__( self: int ): return str(self ) def lowerCAmelCase__ ( self: str , UpperCamelCase_: tuple[int, int] ): if not (isinstance(UpperCamelCase_ , (list, tuple) ) and len(UpperCamelCase_ ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self: List[str] , UpperCamelCase_: tuple[int, int] ): assert self.validate_indicies(UpperCamelCase_ ) return self.array[loc[0]][loc[1]] def __setitem__( self: Tuple , UpperCamelCase_: tuple[int, int] , UpperCamelCase_: float ): assert self.validate_indicies(UpperCamelCase_ ) __lowerCamelCase = value def __add__( self: Any , UpperCamelCase_: Matrix ): assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) assert self.row == another.row and self.column == another.column # Add __lowerCamelCase = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __lowerCamelCase = self[r, c] + another[r, c] return result def __neg__( self: Union[str, Any] ): __lowerCamelCase = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __lowerCamelCase = -self[r, c] return result def __sub__( self: Union[str, Any] , UpperCamelCase_: Matrix ): return self + (-another) def __mul__( self: List[str] , UpperCamelCase_: int | float | Matrix ): if isinstance(UpperCamelCase_ , (int, float) ): # Scalar multiplication __lowerCamelCase = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __lowerCamelCase = self[r, c] * another return result elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): # Matrix multiplication assert self.column == another.row __lowerCamelCase = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: __lowerCamelCase = F'Unsupported type given for another ({type(UpperCamelCase_ )})' raise TypeError(UpperCamelCase_ ) def lowerCAmelCase__ ( self: str ): __lowerCamelCase = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): __lowerCamelCase = self[r, c] return result def lowerCAmelCase__ ( self: str , UpperCamelCase_: Matrix , UpperCamelCase_: Matrix ): assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate __lowerCamelCase = v.transpose() __lowerCamelCase = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = Matrix(3 , 3 , 0 ) for i in range(3 ): __lowerCamelCase = 1 print(f'a^(-1) is {ainv}' ) # u, v __lowerCamelCase = Matrix(3 , 1 , 0 ) __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1, 2, -3 __lowerCamelCase = Matrix(3 , 1 , 0 ) __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 4, -2, 5 print(f'u is {u}' ) print(f'v is {v}' ) print(f'uv^T is {u * v.transpose()}' ) # Sherman Morrison print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(A__ , A__ )}' ) def lowerCamelCase__ ( ): '''simple docstring''' import doctest doctest.testmod() testa()
80
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Tuple = 'bert' def __init__( self: List[str] , UpperCamelCase_: str=3_05_22 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=5_12 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: List[str]=1E-12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: Tuple=True , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Optional[Any] , ): super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache __lowerCamelCase = classifier_dropout class lowerCamelCase__( __lowerCamelCase): @property def lowerCAmelCase__ ( self: Any ): if self.task == "multiple-choice": __lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __lowerCamelCase = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
80
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'caidas/swin2sr-classicalsr-x2-64': ( 'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json' ), } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Tuple = 'swin2sr' UpperCAmelCase__ : Optional[int] = { 'hidden_size': 'embed_dim', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self: Any , UpperCamelCase_: Any=64 , UpperCamelCase_: Optional[Any]=1 , UpperCamelCase_: Dict=3 , UpperCamelCase_: Any=1_80 , UpperCamelCase_: Optional[Any]=[6, 6, 6, 6, 6, 6] , UpperCamelCase_: Union[str, Any]=[6, 6, 6, 6, 6, 6] , UpperCamelCase_: Tuple=8 , UpperCamelCase_: int=2.0 , UpperCamelCase_: List[str]=True , UpperCamelCase_: int=0.0 , UpperCamelCase_: Any=0.0 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Optional[Any]="gelu" , UpperCamelCase_: List[Any]=False , UpperCamelCase_: int=0.02 , UpperCamelCase_: Union[str, Any]=1E-5 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: List[Any]=1.0 , UpperCamelCase_: Any="1conv" , UpperCamelCase_: Dict="pixelshuffle" , **UpperCamelCase_: Dict , ): super().__init__(**UpperCamelCase_ ) __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = num_channels __lowerCamelCase = embed_dim __lowerCamelCase = depths __lowerCamelCase = len(UpperCamelCase_ ) __lowerCamelCase = num_heads __lowerCamelCase = window_size __lowerCamelCase = mlp_ratio __lowerCamelCase = qkv_bias __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = drop_path_rate __lowerCamelCase = hidden_act __lowerCamelCase = use_absolute_embeddings __lowerCamelCase = layer_norm_eps __lowerCamelCase = initializer_range __lowerCamelCase = upscale __lowerCamelCase = img_range __lowerCamelCase = resi_connection __lowerCamelCase = upsampler
80
from __future__ import annotations from math import ceil, floor, sqrt def lowerCamelCase__ ( A__ : int = 2000000 ): '''simple docstring''' __lowerCamelCase = [0] __lowerCamelCase = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target __lowerCamelCase = 0 # the area corresponding to the grid that gives the product closest to target __lowerCamelCase = 0 # an estimate of b, using the quadratic formula __lowerCamelCase = 42 # the largest integer less than b_estimate __lowerCamelCase = 42 # the largest integer less than b_estimate __lowerCamelCase = 42 # the triangle number corresponding to b_floor __lowerCamelCase = 42 # the triangle number corresponding to b_ceil __lowerCamelCase = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): __lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 __lowerCamelCase = floor(A__ ) __lowerCamelCase = ceil(A__ ) __lowerCamelCase = triangle_numbers[b_floor] __lowerCamelCase = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): __lowerCamelCase = triangle_b_first_guess * triangle_a __lowerCamelCase = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): __lowerCamelCase = triangle_b_second_guess * triangle_a __lowerCamelCase = idx_a * b_ceil return area if __name__ == "__main__": print(f"""{solution() = }""")
80
1
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right UpperCAmelCase_ = 250_004 UpperCAmelCase_ = 250_020 @require_sentencepiece @require_tokenizers class lowerCamelCase__( __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Any = MBartTokenizer UpperCAmelCase__ : Union[str, Any] = MBartTokenizerFast UpperCAmelCase__ : List[str] = True UpperCAmelCase__ : Tuple = True def lowerCAmelCase__ ( self: Dict ): super().setUp() # We have a SentencePiece fixture for testing __lowerCamelCase = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) __lowerCamelCase = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCamelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) __lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) __lowerCamelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) self.assertListEqual( UpperCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def lowerCAmelCase__ ( self: Union[str, Any] ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __lowerCamelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): __lowerCamelCase = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) __lowerCamelCase = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) __lowerCamelCase = tempfile.mkdtemp() __lowerCamelCase = tokenizer_r.save_pretrained(UpperCamelCase_ ) __lowerCamelCase = tokenizer_p.save_pretrained(UpperCamelCase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) __lowerCamelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ ) # Checks everything loads correctly in the same way __lowerCamelCase = tokenizer_r.from_pretrained(UpperCamelCase_ ) __lowerCamelCase = tokenizer_p.from_pretrained(UpperCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCamelCase_ ) # Save tokenizer rust, legacy_format=True __lowerCamelCase = tempfile.mkdtemp() __lowerCamelCase = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ ) __lowerCamelCase = tokenizer_p.save_pretrained(UpperCamelCase_ ) # Checks it save with the same files self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ ) # Checks everything loads correctly in the same way __lowerCamelCase = tokenizer_r.from_pretrained(UpperCamelCase_ ) __lowerCamelCase = tokenizer_p.from_pretrained(UpperCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) ) shutil.rmtree(UpperCamelCase_ ) # Save tokenizer rust, legacy_format=False __lowerCamelCase = tempfile.mkdtemp() __lowerCamelCase = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ ) __lowerCamelCase = tokenizer_p.save_pretrained(UpperCamelCase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __lowerCamelCase = tokenizer_r.from_pretrained(UpperCamelCase_ ) __lowerCamelCase = tokenizer_p.from_pretrained(UpperCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) ) shutil.rmtree(UpperCamelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase__( unittest.TestCase): UpperCAmelCase__ : Union[str, Any] = 'facebook/mbart-large-en-ro' UpperCAmelCase__ : Optional[Any] = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] UpperCAmelCase__ : Tuple = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] UpperCAmelCase__ : int = [8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2, EN_CODE] @classmethod def lowerCAmelCase__ ( cls: List[Any] ): __lowerCamelCase = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" ) __lowerCamelCase = 1 return cls def lowerCAmelCase__ ( self: str ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Dict ): self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids ) __lowerCamelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2] __lowerCamelCase = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) __lowerCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_ ) def lowerCAmelCase__ ( self: str ): __lowerCamelCase = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , UpperCamelCase_ ) __lowerCamelCase = 10 __lowerCamelCase = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , UpperCamelCase_ ) self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[int] ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_26, 25_00_01] ) def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = tempfile.mkdtemp() __lowerCamelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCamelCase_ ) __lowerCamelCase = MBartTokenizer.from_pretrained(UpperCamelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_ ) @require_torch def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors="""pt""" ) __lowerCamelCase = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) __lowerCamelCase = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) __lowerCamelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def lowerCAmelCase__ ( self: int ): __lowerCamelCase = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors="""pt""" ) __lowerCamelCase = self.tokenizer( text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors="""pt""" ) __lowerCamelCase = targets["""input_ids"""] __lowerCamelCase = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" ) self.assertEqual( nested_simplify(UpperCamelCase_ ) , { # A, test, EOS, en_XX """input_ids""": [[62, 30_34, 2, 25_00_04]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 25_00_01, } , )
80
import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = [] __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCamelCase_ ) __lowerCamelCase = resnets __lowerCamelCase = attentions if self.add_downsample: __lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=True ): __lowerCamelCase = () for resnet, attn in zip(self.resnets , self.attentions ): __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) __lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: __lowerCamelCase = self.downsamplers_a(UpperCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = resnets if self.add_downsample: __lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=True ): __lowerCamelCase = () for resnet in self.resnets: __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: __lowerCamelCase = self.downsamplers_a(UpperCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = [] __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels __lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCamelCase_ ) __lowerCamelCase = resnets __lowerCamelCase = attentions if self.add_upsample: __lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any]=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states __lowerCamelCase = res_hidden_states_tuple[-1] __lowerCamelCase = res_hidden_states_tuple[:-1] __lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) __lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) if self.add_upsample: __lowerCamelCase = self.upsamplers_a(UpperCamelCase_ ) return hidden_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = True UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = [] for i in range(self.num_layers ): __lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels __lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels __lowerCamelCase = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = resnets if self.add_upsample: __lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=True ): for resnet in self.resnets: # pop res hidden states __lowerCamelCase = res_hidden_states_tuple[-1] __lowerCamelCase = res_hidden_states_tuple[:-1] __lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) if self.add_upsample: __lowerCamelCase = self.upsamplers_a(UpperCamelCase_ ) return hidden_states class lowerCamelCase__( nn.Module): UpperCAmelCase__ : int UpperCAmelCase__ : float = 0.0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : jnp.dtype = jnp.floataa def lowerCAmelCase__ ( self: int ): # there is always at least one resnet __lowerCamelCase = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] __lowerCamelCase = [] for _ in range(self.num_layers ): __lowerCamelCase = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(UpperCamelCase_ ) __lowerCamelCase = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(UpperCamelCase_ ) __lowerCamelCase = resnets __lowerCamelCase = attentions def __call__( self: int , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=True ): __lowerCamelCase = self.resnets[0](UpperCamelCase_ , UpperCamelCase_ ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): __lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) __lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ ) return hidden_states
80
1
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowerCamelCase__ ( *A__ : Any ): '''simple docstring''' with open(A__ , """r""" ) as fh: fcntl.flock(A__ , fcntl.LOCK_EX ) try: print(*A__ ) finally: fcntl.flock(A__ , fcntl.LOCK_UN ) UpperCAmelCase_ = int(os.environ['LOCAL_RANK']) torch.cuda.set_device(local_rank) UpperCAmelCase_ = torch.device('cuda', local_rank) UpperCAmelCase_ = socket.gethostname() UpperCAmelCase_ = f"""[{hostname}-{local_rank}]""" try: # test distributed dist.init_process_group('nccl') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank UpperCAmelCase_ = dist.get_rank() UpperCAmelCase_ = dist.get_world_size() printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""") dist.barrier() if rank == 0: printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""") except Exception: printflock(f"""{gpu} is broken""") raise
80
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging UpperCAmelCase_ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt'] UpperCAmelCase_ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('0.9.0'): raise Exception('requires fairseq >= 0.9.0') logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = ' Hello world! cécé herlolip' UpperCAmelCase_ = [ ('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'), ('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'), ('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'), ('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'), ] def lowerCamelCase__ ( A__ : List[Any] ): '''simple docstring''' __lowerCamelCase = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """_float_tensor""", ] for k in ignore_keys: state_dict.pop(A__ , A__ ) def lowerCamelCase__ ( A__ : Tuple , A__ : Any , A__ : Union[str, Any] ): '''simple docstring''' __lowerCamelCase = dct.pop(A__ ) __lowerCamelCase = val def lowerCamelCase__ ( A__ : Tuple ): '''simple docstring''' __lowerCamelCase = torch.load(A__ , map_location="""cpu""" ) __lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval() hub_interface.model.load_state_dict(sd["""model"""] ) return hub_interface def lowerCamelCase__ ( A__ : List[Any] ): '''simple docstring''' __lowerCamelCase, __lowerCamelCase = emb.weight.shape __lowerCamelCase = nn.Linear(A__ , A__ , bias=A__ ) __lowerCamelCase = emb.weight.data return lin_layer @torch.no_grad() def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None ): '''simple docstring''' if not os.path.exists(A__ ): __lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , A__ ).eval() else: __lowerCamelCase = load_xsum_checkpoint(A__ ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: __lowerCamelCase = checkpoint_path.replace(""".""" , """-""" ) __lowerCamelCase = BartConfig.from_pretrained(A__ ) __lowerCamelCase = bart.encode(A__ ).unsqueeze(0 ) __lowerCamelCase = BartTokenizer.from_pretrained(A__ ).encode(A__ , return_tensors="""pt""" ).unsqueeze(0 ) if not torch.eq(A__ , A__ ).all(): raise ValueError( f'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' ) if checkpoint_path == "bart.large.mnli": __lowerCamelCase = bart.state_dict() remove_ignore_keys_(A__ ) __lowerCamelCase = state_dict["""model.decoder.embed_tokens.weight"""] for src, dest in mnli_rename_keys: rename_key(A__ , A__ , A__ ) __lowerCamelCase = BartForSequenceClassification(A__ ).eval() model.load_state_dict(A__ ) __lowerCamelCase = bart.predict("""mnli""" , A__ , return_logits=A__ ) __lowerCamelCase = model(A__ )[0] # logits else: # no classification heads to worry about __lowerCamelCase = bart.model.state_dict() remove_ignore_keys_(A__ ) __lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""] __lowerCamelCase = bart.extract_features(A__ ) if hf_checkpoint_name == "facebook/bart-large": __lowerCamelCase = BartModel(A__ ).eval() model.load_state_dict(A__ ) __lowerCamelCase = model(A__ ).model[0] else: __lowerCamelCase = BartForConditionalGeneration(A__ ).eval() # an existing summarization ckpt model.model.load_state_dict(A__ ) if hasattr(A__ , """lm_head""" ): __lowerCamelCase = make_linear_from_emb(model.model.shared ) __lowerCamelCase = model.model(A__ )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( f'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum' ) UpperCAmelCase_ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
80
1
from __future__ import annotations from collections.abc import MutableSequence class lowerCamelCase__: def __init__( self: int , UpperCamelCase_: int , UpperCamelCase_: MutableSequence[float] ): if len(UpperCamelCase_ ) != degree + 1: raise ValueError( """The number of coefficients should be equal to the degree + 1.""" ) __lowerCamelCase = list(UpperCamelCase_ ) __lowerCamelCase = degree def __add__( self: Optional[int] , UpperCamelCase_: Polynomial ): if self.degree > polynomial_a.degree: __lowerCamelCase = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , UpperCamelCase_ ) else: __lowerCamelCase = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , UpperCamelCase_ ) def __sub__( self: List[Any] , UpperCamelCase_: Polynomial ): return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self: Dict ): return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self: Tuple , UpperCamelCase_: Polynomial ): __lowerCamelCase = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: int | float ): __lowerCamelCase = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self: Any ): __lowerCamelCase = """""" for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(UpperCamelCase_ ) return polynomial def __repr__( self: Optional[int] ): return self.__str__() def lowerCAmelCase__ ( self: int ): __lowerCamelCase = [0] * self.degree for i in range(self.degree ): __lowerCamelCase = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: int | float = 0 ): __lowerCamelCase = [0] * (self.degree + 2) __lowerCamelCase = constant for i in range(self.degree + 1 ): __lowerCamelCase = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , UpperCamelCase_ ) def __eq__( self: Dict , UpperCamelCase_: object ): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self: Optional[int] , UpperCamelCase_: object ): return not self.__eq__(UpperCamelCase_ )
80
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class lowerCamelCase__: def __init__( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=14 , UpperCamelCase_: int=7 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: int=99 , UpperCamelCase_: str=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: Dict=16 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: List[str]=3 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Tuple=None , ): __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_input_mask __lowerCamelCase = use_labels __lowerCamelCase = use_mc_token_ids __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope __lowerCamelCase = self.vocab_size - 1 def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None if self.use_mc_token_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() __lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase__ ( self: Dict ): return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[str] , *UpperCamelCase_: Optional[Any] ): __lowerCamelCase = CTRLModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ ) model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , *UpperCamelCase_: Tuple ): __lowerCamelCase = CTRLLMHeadModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ( __lowerCamelCase ), ) = config_and_inputs __lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask} return config, inputs_dict def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , *UpperCamelCase_: Union[str, Any] ): __lowerCamelCase = self.num_labels __lowerCamelCase = CTRLForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Any = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () UpperCAmelCase__ : Optional[Any] = (CTRLLMHeadModel,) if is_torch_available() else () UpperCAmelCase__ : int = ( { 'feature-extraction': CTRLModel, 'text-classification': CTRLForSequenceClassification, 'text-generation': CTRLLMHeadModel, 'zero-shot': CTRLForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ : List[str] = True UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Optional[Any] = False def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = CTRLModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 ) def lowerCAmelCase__ ( self: Optional[int] ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self: Optional[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase__ ( self: List[Any] ): pass @slow def lowerCAmelCase__ ( self: Optional[Any] ): for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = CTRLModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :) def lowerCAmelCase__ ( self: Optional[Any] ): pass @require_torch class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: List[str] ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" ) model.to(UpperCamelCase_ ) __lowerCamelCase = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=UpperCamelCase_ ) # Legal the president is __lowerCamelCase = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a __lowerCamelCase = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ ) self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
80
1
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class lowerCamelCase__: UpperCAmelCase__ : List[str] = MBartConfig UpperCAmelCase__ : str = {} UpperCAmelCase__ : int = 'gelu' def __init__( self: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: str=13 , UpperCamelCase_: Union[str, Any]=7 , UpperCamelCase_: Dict=True , UpperCamelCase_: Union[str, Any]=False , UpperCamelCase_: str=99 , UpperCamelCase_: Optional[Any]=32 , UpperCamelCase_: Any=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Union[str, Any]=37 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Tuple=20 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: List[Any]=1 , UpperCamelCase_: Dict=0 , ): __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = eos_token_id __lowerCamelCase = pad_token_id __lowerCamelCase = bos_token_id def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __lowerCamelCase = prepare_mbart_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return config, inputs_dict def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: Optional[int] ): __lowerCamelCase = TFMBartModel(config=UpperCamelCase_ ).get_decoder() __lowerCamelCase = inputs_dict["""input_ids"""] __lowerCamelCase = input_ids[:1, :] __lowerCamelCase = inputs_dict["""attention_mask"""][:1, :] __lowerCamelCase = inputs_dict["""head_mask"""] __lowerCamelCase = 1 # first forward pass __lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ ) __lowerCamelCase, __lowerCamelCase = outputs.to_tuple() __lowerCamelCase = past_key_values[1] def lowerCamelCase__ ( A__ : Optional[int] , A__ : Dict , A__ : Dict , A__ : Tuple=None , A__ : List[Any]=None , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , ): '''simple docstring''' if attention_mask is None: __lowerCamelCase = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __lowerCamelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __lowerCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __lowerCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __lowerCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : int = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () UpperCAmelCase__ : List[str] = (TFMBartForConditionalGeneration,) if is_tf_available() else () UpperCAmelCase__ : Union[str, Any] = ( { 'conversational': TFMBartForConditionalGeneration, 'feature-extraction': TFMBartModel, 'summarization': TFMBartForConditionalGeneration, 'text2text-generation': TFMBartForConditionalGeneration, 'translation': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) UpperCAmelCase__ : Any = True UpperCAmelCase__ : Union[str, Any] = False UpperCAmelCase__ : Union[str, Any] = False def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Dict , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: str ): if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = TFMBartModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ ) def lowerCAmelCase__ ( self: Dict ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ ) @require_sentencepiece @require_tokenizers @require_tf class lowerCamelCase__( unittest.TestCase): UpperCAmelCase__ : Any = [ ' UN Chief Says There Is No Military Solution in Syria', ] UpperCAmelCase__ : Tuple = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', ] UpperCAmelCase__ : str = 'facebook/mbart-large-en-ro' @cached_property def lowerCAmelCase__ ( self: Tuple ): return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCAmelCase__ ( self: Union[str, Any] , **UpperCamelCase_: Union[str, Any] ): __lowerCamelCase = self.translate_src_text(**UpperCamelCase_ ) self.assertListEqual(self.expected_text , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[int] , **UpperCamelCase_: List[Any] ): __lowerCamelCase = self.tokenizer(self.src_text , **UpperCamelCase_ , return_tensors="""tf""" ) __lowerCamelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) __lowerCamelCase = self.tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) return generated_words @slow def lowerCAmelCase__ ( self: List[Any] ): self._assert_generated_batch_equal_expected()
80
def lowerCamelCase__ ( A__ : int = 2000000 ): '''simple docstring''' __lowerCamelCase = [0 for i in range(n + 1 )] __lowerCamelCase = 1 __lowerCamelCase = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , A__ ): __lowerCamelCase = 1 __lowerCamelCase = 0 for i in range(A__ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f"""{solution() = }""")
80
1
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Tuple = ['pixel_values'] def __init__( self: Union[str, Any] , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: int = 8 , **UpperCamelCase_: Union[str, Any] , ): super().__init__(**UpperCamelCase_ ) __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_pad __lowerCamelCase = pad_size def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: List[str] ): return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Any , UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None ): __lowerCamelCase, __lowerCamelCase = get_image_size(UpperCamelCase_ ) __lowerCamelCase = (old_height // size + 1) * size - old_height __lowerCamelCase = (old_width // size + 1) * size - old_width return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase_ ) def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: int , ): __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = do_pad if do_pad is not None else self.do_pad __lowerCamelCase = pad_size if pad_size is not None else self.pad_size __lowerCamelCase = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_pad: __lowerCamelCase = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images] __lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] __lowerCamelCase = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
80
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase): UpperCAmelCase__ : Dict = 1 @register_to_config def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None ): # set `betas`, `alphas`, `timesteps` self.set_timesteps(UpperCamelCase_ ) # standard deviation of the initial noise distribution __lowerCamelCase = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. __lowerCamelCase = 4 # running values __lowerCamelCase = [] def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None ): __lowerCamelCase = num_inference_steps __lowerCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] __lowerCamelCase = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: __lowerCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: __lowerCamelCase = torch.sin(steps * math.pi / 2 ) ** 2 __lowerCamelCase = (1.0 - self.betas**2) ** 0.5 __lowerCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] __lowerCamelCase = timesteps.to(UpperCamelCase_ ) __lowerCamelCase = [] def lowerCAmelCase__ ( self: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True , ): if self.num_inference_steps is None: raise ValueError( """Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" ) __lowerCamelCase = (self.timesteps == timestep).nonzero().item() __lowerCamelCase = timestep_index + 1 __lowerCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(UpperCamelCase_ ) if len(self.ets ) == 1: __lowerCamelCase = self.ets[-1] elif len(self.ets ) == 2: __lowerCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: __lowerCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: __lowerCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) __lowerCamelCase = self._get_prev_sample(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ): return sample def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any ): __lowerCamelCase = self.alphas[timestep_index] __lowerCamelCase = self.betas[timestep_index] __lowerCamelCase = self.alphas[prev_timestep_index] __lowerCamelCase = self.betas[prev_timestep_index] __lowerCamelCase = (sample - sigma * ets) / max(UpperCamelCase_ , 1E-8 ) __lowerCamelCase = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self: List[Any] ): return self.config.num_train_timesteps
80
1
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Any = StableUnCLIPPipeline UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_PARAMS UpperCAmelCase__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS UpperCAmelCase__ : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS UpperCAmelCase__ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false UpperCAmelCase__ : Optional[int] = False def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = 32 __lowerCamelCase = embedder_hidden_size # prior components torch.manual_seed(0 ) __lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) __lowerCamelCase = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase_ , projection_dim=UpperCamelCase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) __lowerCamelCase = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCamelCase_ , num_layers=1 , ) torch.manual_seed(0 ) __lowerCamelCase = DDPMScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=10_00 , clip_sample=UpperCamelCase_ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , ) # regular denoising components torch.manual_seed(0 ) __lowerCamelCase = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase_ ) __lowerCamelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) __lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) __lowerCamelCase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) __lowerCamelCase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase_ , layers_per_block=1 , upcast_attention=UpperCamelCase_ , use_linear_projection=UpperCamelCase_ , ) torch.manual_seed(0 ) __lowerCamelCase = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=UpperCamelCase_ , steps_offset=1 , ) torch.manual_seed(0 ) __lowerCamelCase = AutoencoderKL() __lowerCamelCase = { # prior components """prior_tokenizer""": prior_tokenizer, """prior_text_encoder""": prior_text_encoder, """prior""": prior, """prior_scheduler""": prior_scheduler, # image noising components """image_normalizer""": image_normalizer, """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder, """unet""": unet, """scheduler""": scheduler, """vae""": vae, } return components def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[int]=0 ): if str(UpperCamelCase_ ).startswith("""mps""" ): __lowerCamelCase = torch.manual_seed(UpperCamelCase_ ) else: __lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __lowerCamelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """prior_num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = torch_device == """cpu""" self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase_ ) def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase_ ) @slow @require_torch_gpu class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: Optional[int] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" ) __lowerCamelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowerCamelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) __lowerCamelCase = pipe("""anime turle""" , generator=UpperCamelCase_ , output_type="""np""" ) __lowerCamelCase = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowerCamelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) __lowerCamelCase = pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowerCamelCase = pipe( """anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , ) __lowerCamelCase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
80
import os from collections.abc import Iterator def lowerCamelCase__ ( A__ : str = "." ): '''simple docstring''' for dir_path, dir_names, filenames in os.walk(A__ ): __lowerCamelCase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(A__ )[1] in (".py", ".ipynb"): yield os.path.join(A__ , A__ ).lstrip("""./""" ) def lowerCamelCase__ ( A__ : Optional[int] ): '''simple docstring''' return f'{i * " "}*' if i else "\n##" def lowerCamelCase__ ( A__ : str , A__ : str ): '''simple docstring''' __lowerCamelCase = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part: print(f'{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}' ) return new_path def lowerCamelCase__ ( A__ : str = "." ): '''simple docstring''' __lowerCamelCase = """""" for filepath in sorted(good_file_paths(A__ ) ): __lowerCamelCase, __lowerCamelCase = os.path.split(A__ ) if filepath != old_path: __lowerCamelCase = print_path(A__ , A__ ) __lowerCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0 __lowerCamelCase = f'{filepath}/{filename}'.replace(""" """ , """%20""" ) __lowerCamelCase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0] print(f'{md_prefix(A__ )} [{filename}]({url})' ) if __name__ == "__main__": print_directory_md('.')
80
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=__lowerCamelCase) class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True}) UpperCAmelCase__ : ClassVar[Features] = Features({'audio': Audio()}) UpperCAmelCase__ : ClassVar[Features] = Features({'labels': ClassLabel}) UpperCAmelCase__ : str = "audio" UpperCAmelCase__ : str = "labels" def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Tuple ): if self.label_column not in features: raise ValueError(F'Column {self.label_column} is not present in features.' ) if not isinstance(features[self.label_column] , UpperCamelCase_ ): raise ValueError(F'Column {self.label_column} is not a ClassLabel.' ) __lowerCamelCase = copy.deepcopy(self ) __lowerCamelCase = self.label_schema.copy() __lowerCamelCase = features[self.label_column] __lowerCamelCase = label_schema return task_template @property def lowerCAmelCase__ ( self: int ): return { self.audio_column: "audio", self.label_column: "labels", }
80
from __future__ import annotations def lowerCamelCase__ ( A__ : list ): '''simple docstring''' if not nums: raise ValueError("""List is empty""" ) return sum(A__ ) / len(A__ ) if __name__ == "__main__": import doctest doctest.testmod()
80
1
from collections.abc import Generator from math import sin def lowerCamelCase__ ( A__ : bytes ): '''simple docstring''' if len(A__ ) != 32: raise ValueError("""Input must be of length 32""" ) __lowerCamelCase = B"""""" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def lowerCamelCase__ ( A__ : int ): '''simple docstring''' if i < 0: raise ValueError("""Input must be non-negative""" ) __lowerCamelCase = format(A__ , """08x""" )[-8:] __lowerCamelCase = B"""""" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" ) return little_endian_hex def lowerCamelCase__ ( A__ : bytes ): '''simple docstring''' __lowerCamelCase = B"""""" for char in message: bit_string += format(A__ , """08b""" ).encode("""utf-8""" ) __lowerCamelCase = format(len(A__ ) , """064b""" ).encode("""utf-8""" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(A__ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def lowerCamelCase__ ( A__ : bytes ): '''simple docstring''' if len(A__ ) % 512 != 0: raise ValueError("""Input must have length that's a multiple of 512""" ) for pos in range(0 , len(A__ ) , 512 ): __lowerCamelCase = bit_string[pos : pos + 512] __lowerCamelCase = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def lowerCamelCase__ ( A__ : int ): '''simple docstring''' if i < 0: raise ValueError("""Input must be non-negative""" ) __lowerCamelCase = format(A__ , """032b""" ) __lowerCamelCase = """""" for c in i_str: new_str += "1" if c == "0" else "0" return int(A__ , 2 ) def lowerCamelCase__ ( A__ : int , A__ : int ): '''simple docstring''' return (a + b) % 2**32 def lowerCamelCase__ ( A__ : int , A__ : int ): '''simple docstring''' if i < 0: raise ValueError("""Input must be non-negative""" ) if shift < 0: raise ValueError("""Shift must be non-negative""" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def lowerCamelCase__ ( A__ : bytes ): '''simple docstring''' __lowerCamelCase = preprocess(A__ ) __lowerCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __lowerCamelCase = 0X67452301 __lowerCamelCase = 0XEFCDAB89 __lowerCamelCase = 0X98BADCFE __lowerCamelCase = 0X10325476 __lowerCamelCase = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(A__ ): __lowerCamelCase = aa __lowerCamelCase = ba __lowerCamelCase = ca __lowerCamelCase = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __lowerCamelCase = d ^ (b & (c ^ d)) __lowerCamelCase = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __lowerCamelCase = c ^ (d & (b ^ c)) __lowerCamelCase = (5 * i + 1) % 16 elif i <= 47: __lowerCamelCase = b ^ c ^ d __lowerCamelCase = (3 * i + 5) % 16 else: __lowerCamelCase = c ^ (b | not_aa(A__ )) __lowerCamelCase = (7 * i) % 16 __lowerCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32 __lowerCamelCase = d __lowerCamelCase = c __lowerCamelCase = b __lowerCamelCase = sum_aa(A__ , left_rotate_aa(A__ , shift_amounts[i] ) ) # Add hashed chunk to running total __lowerCamelCase = sum_aa(A__ , A__ ) __lowerCamelCase = sum_aa(A__ , A__ ) __lowerCamelCase = sum_aa(A__ , A__ ) __lowerCamelCase = sum_aa(A__ , A__ ) __lowerCamelCase = reformat_hex(A__ ) + reformat_hex(A__ ) + reformat_hex(A__ ) + reformat_hex(A__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
80
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase): UpperCAmelCase__ : Any = 'maskformer-swin' UpperCAmelCase__ : List[Any] = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self: Any , UpperCamelCase_: Any=2_24 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Optional[int]=96 , UpperCamelCase_: List[str]=[2, 2, 6, 2] , UpperCamelCase_: Optional[Any]=[3, 6, 12, 24] , UpperCamelCase_: str=7 , UpperCamelCase_: int=4.0 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=1E-5 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] , ): super().__init__(**UpperCamelCase_ ) __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = num_channels __lowerCamelCase = embed_dim __lowerCamelCase = depths __lowerCamelCase = len(UpperCamelCase_ ) __lowerCamelCase = num_heads __lowerCamelCase = window_size __lowerCamelCase = mlp_ratio __lowerCamelCase = qkv_bias __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = drop_path_rate __lowerCamelCase = hidden_act __lowerCamelCase = use_absolute_embeddings __lowerCamelCase = layer_norm_eps __lowerCamelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowerCamelCase = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) ) __lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(UpperCamelCase_ ) + 1 )] __lowerCamelCase, __lowerCamelCase = get_aligned_output_features_output_indices( out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
80
1
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class lowerCamelCase__: UpperCAmelCase__ : int UpperCAmelCase__ : int class lowerCamelCase__: def __init__( self: Dict , UpperCamelCase_: int ): __lowerCamelCase = [[] for _ in range(UpperCamelCase_ )] __lowerCamelCase = size def __getitem__( self: Optional[int] , UpperCamelCase_: int ): return iter(self._graph[vertex] ) @property def lowerCAmelCase__ ( self: Any ): return self._size def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int ): if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(UpperCamelCase_ , UpperCamelCase_ ) ) def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int ): __lowerCamelCase = deque([start_vertex] ) __lowerCamelCase = [None] * self.size __lowerCamelCase = 0 while queue: __lowerCamelCase = queue.popleft() __lowerCamelCase = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: __lowerCamelCase = current_distance + edge.weight __lowerCamelCase = distances[edge.destination_vertex] if ( isinstance(UpperCamelCase_ , UpperCamelCase_ ) and new_distance >= dest_vertex_distance ): continue __lowerCamelCase = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
80
from __future__ import annotations def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): __lowerCamelCase, __lowerCamelCase = array[indexa], array[indexa] def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if length > 1: __lowerCamelCase = int(length / 2 ) for i in range(A__ , low + middle ): comp_and_swap(A__ , A__ , i + middle , A__ ) bitonic_merge(A__ , A__ , A__ , A__ ) bitonic_merge(A__ , low + middle , A__ , A__ ) def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if length > 1: __lowerCamelCase = int(length / 2 ) bitonic_sort(A__ , A__ , A__ , 1 ) bitonic_sort(A__ , low + middle , A__ , 0 ) bitonic_merge(A__ , A__ , A__ , A__ ) if __name__ == "__main__": UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(',')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('\nSorted array in ascending order is: ', end='') print(*unsorted, sep=', ') bitonic_merge(unsorted, 0, len(unsorted), 0) print('Sorted array in descending order is: ', end='') print(*unsorted, sep=', ')
80
1
def lowerCamelCase__ ( A__ : list ): '''simple docstring''' for i in range(len(A__ ) - 1 , 0 , -1 ): __lowerCamelCase = False for j in range(A__ , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: __lowerCamelCase, __lowerCamelCase = unsorted[j - 1], unsorted[j] __lowerCamelCase = True for j in range(A__ ): if unsorted[j] > unsorted[j + 1]: __lowerCamelCase, __lowerCamelCase = unsorted[j + 1], unsorted[j] __lowerCamelCase = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase_ = [int(item) for item in user_input.split(',')] print(f"""{cocktail_shaker_sort(unsorted) = }""")
80
from ... import PretrainedConfig UpperCAmelCase_ = { 'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json', } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP UpperCAmelCase__ : Dict = 'nezha' def __init__( self: Dict , UpperCamelCase_: Any=2_11_28 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Optional[int]=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Union[str, Any]=5_12 , UpperCamelCase_: Any=64 , UpperCamelCase_: Dict=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: Optional[Any]=1E-12 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Any=0 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: str=True , **UpperCamelCase_: Any , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = max_relative_position __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = classifier_dropout __lowerCamelCase = use_cache
80
1
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) UpperCAmelCase_ = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) UpperCAmelCase_ = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) UpperCAmelCase_ = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) UpperCAmelCase_ = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class lowerCamelCase__( _BaseAutoModelClass): UpperCAmelCase__ : Any = FLAX_MODEL_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModel) class lowerCamelCase__( _BaseAutoModelClass): UpperCAmelCase__ : List[str] = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class lowerCamelCase__( _BaseAutoModelClass): UpperCAmelCase__ : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class lowerCamelCase__( _BaseAutoModelClass): UpperCAmelCase__ : Optional[int] = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class lowerCamelCase__( _BaseAutoModelClass): UpperCAmelCase__ : List[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class lowerCamelCase__( _BaseAutoModelClass): UpperCAmelCase__ : Optional[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class lowerCamelCase__( _BaseAutoModelClass): UpperCAmelCase__ : Union[str, Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class lowerCamelCase__( _BaseAutoModelClass): UpperCAmelCase__ : Any = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class lowerCamelCase__( _BaseAutoModelClass): UpperCAmelCase__ : Any = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class lowerCamelCase__( _BaseAutoModelClass): UpperCAmelCase__ : int = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class lowerCamelCase__( _BaseAutoModelClass): UpperCAmelCase__ : Optional[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class lowerCamelCase__( _BaseAutoModelClass): UpperCAmelCase__ : Optional[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class lowerCamelCase__( _BaseAutoModelClass): UpperCAmelCase__ : Any = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
80
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__: def __init__( self: Union[str, Any] , UpperCamelCase_: str = None , UpperCamelCase_: uuid.UUID = None , UpperCamelCase_: Dict=None , UpperCamelCase_: Any=None ): if not conversation_id: __lowerCamelCase = uuid.uuida() if past_user_inputs is None: __lowerCamelCase = [] if generated_responses is None: __lowerCamelCase = [] __lowerCamelCase = conversation_id __lowerCamelCase = past_user_inputs __lowerCamelCase = generated_responses __lowerCamelCase = text def __eq__( self: Optional[Any] , UpperCamelCase_: Union[str, Any] ): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowerCAmelCase__ ( self: int , UpperCamelCase_: str , UpperCamelCase_: bool = False ): if self.new_user_input: if overwrite: logger.warning( F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ' F'with: "{text}".' ) __lowerCamelCase = text else: logger.warning( F'User input added while unprocessed input was existing: "{self.new_user_input}" new input ' F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' ) else: __lowerCamelCase = text def lowerCAmelCase__ ( self: List[str] ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __lowerCamelCase = None def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ): self.generated_responses.append(UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self: Union[str, Any] ): __lowerCamelCase = F'Conversation id: {self.uuid} \n' for is_user, text in self.iter_texts(): __lowerCamelCase = """user""" if is_user else """bot""" output += F'{name} >> {text} \n' return output @add_end_docstrings( __lowerCamelCase , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , ) class lowerCamelCase__( __lowerCamelCase): def __init__( self: List[str] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: str ): super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) if self.tokenizer.pad_token_id is None: __lowerCamelCase = self.tokenizer.eos_token def lowerCAmelCase__ ( self: str , UpperCamelCase_: int=None , UpperCamelCase_: Any=None , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: int ): __lowerCamelCase = {} __lowerCamelCase = {} __lowerCamelCase = {} if min_length_for_response is not None: __lowerCamelCase = min_length_for_response if minimum_tokens is not None: __lowerCamelCase = minimum_tokens if "max_length" in generate_kwargs: __lowerCamelCase = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __lowerCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(UpperCamelCase_ ) return preprocess_params, forward_params, postprocess_params def __call__( self: Any , UpperCamelCase_: Union[Conversation, List[Conversation]] , UpperCamelCase_: Optional[int]=0 , **UpperCamelCase_: Optional[int] ): __lowerCamelCase = super().__call__(UpperCamelCase_ , num_workers=UpperCamelCase_ , **UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1: return outputs[0] return outputs def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Conversation , UpperCamelCase_: Optional[Any]=32 ): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ' """Add user inputs with the conversation's `add_user_input` method""" ) if hasattr(self.tokenizer , """_build_conversation_input_ids""" ): __lowerCamelCase = self.tokenizer._build_conversation_input_ids(UpperCamelCase_ ) else: # If the tokenizer cannot handle conversations, we default to only the old version __lowerCamelCase = self._legacy_parse_and_tokenize(UpperCamelCase_ ) if self.framework == "pt": __lowerCamelCase = torch.LongTensor([input_ids] ) elif self.framework == "tf": __lowerCamelCase = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str=10 , **UpperCamelCase_: List[str] ): __lowerCamelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length ) __lowerCamelCase = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' ) __lowerCamelCase = max_length - minimum_tokens __lowerCamelCase = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: __lowerCamelCase = model_inputs["""attention_mask"""][:, -trim:] __lowerCamelCase = model_inputs.pop("""conversation""" ) __lowerCamelCase = max_length __lowerCamelCase = self.model.generate(**UpperCamelCase_ , **UpperCamelCase_ ) if self.model.config.is_encoder_decoder: __lowerCamelCase = 1 else: __lowerCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int=True ): __lowerCamelCase = model_outputs["""output_ids"""] __lowerCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , ) __lowerCamelCase = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(UpperCamelCase_ ) return conversation def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Conversation ): __lowerCamelCase = self.tokenizer.eos_token_id __lowerCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) ) if len(UpperCamelCase_ ) > self.tokenizer.model_max_length: __lowerCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
80
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : int=False ): '''simple docstring''' __lowerCamelCase = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" __lowerCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def lowerCamelCase__ ( A__ : Any , A__ : List[str] , A__ : Tuple=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: __lowerCamelCase = """""" else: __lowerCamelCase = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __lowerCamelCase = state_dict.pop(f'blocks.{i}.attn.qkv.weight' ) __lowerCamelCase = state_dict.pop(f'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __lowerCamelCase = in_proj_weight[ : config.hidden_size, : ] __lowerCamelCase = in_proj_bias[: config.hidden_size] __lowerCamelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __lowerCamelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __lowerCamelCase = in_proj_weight[ -config.hidden_size :, : ] __lowerCamelCase = in_proj_bias[-config.hidden_size :] def lowerCamelCase__ ( A__ : Tuple ): '''simple docstring''' __lowerCamelCase = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(A__ , A__ ) def lowerCamelCase__ ( A__ : List[str] , A__ : str , A__ : Union[str, Any] ): '''simple docstring''' __lowerCamelCase = dct.pop(A__ ) __lowerCamelCase = val def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def lowerCamelCase__ ( A__ : Optional[int] , A__ : Union[str, Any] ): '''simple docstring''' __lowerCamelCase = ViTConfig() __lowerCamelCase = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": __lowerCamelCase = True __lowerCamelCase = int(vit_name[-12:-10] ) __lowerCamelCase = int(vit_name[-9:-6] ) else: __lowerCamelCase = 1000 __lowerCamelCase = """huggingface/label-files""" __lowerCamelCase = """imagenet-1k-id2label.json""" __lowerCamelCase = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) ) __lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()} __lowerCamelCase = idalabel __lowerCamelCase = {v: k for k, v in idalabel.items()} __lowerCamelCase = int(vit_name[-6:-4] ) __lowerCamelCase = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("""tiny""" ): __lowerCamelCase = 192 __lowerCamelCase = 768 __lowerCamelCase = 12 __lowerCamelCase = 3 elif vit_name[9:].startswith("""small""" ): __lowerCamelCase = 384 __lowerCamelCase = 1536 __lowerCamelCase = 12 __lowerCamelCase = 6 else: pass else: if vit_name[4:].startswith("""small""" ): __lowerCamelCase = 768 __lowerCamelCase = 2304 __lowerCamelCase = 8 __lowerCamelCase = 8 elif vit_name[4:].startswith("""base""" ): pass elif vit_name[4:].startswith("""large""" ): __lowerCamelCase = 1024 __lowerCamelCase = 4096 __lowerCamelCase = 24 __lowerCamelCase = 16 elif vit_name[4:].startswith("""huge""" ): __lowerCamelCase = 1280 __lowerCamelCase = 5120 __lowerCamelCase = 32 __lowerCamelCase = 16 # load original model from timm __lowerCamelCase = timm.create_model(A__ , pretrained=A__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys __lowerCamelCase = timm_model.state_dict() if base_model: remove_classification_head_(A__ ) __lowerCamelCase = create_rename_keys(A__ , A__ ) for src, dest in rename_keys: rename_key(A__ , A__ , A__ ) read_in_q_k_v(A__ , A__ , A__ ) # load HuggingFace model if vit_name[-5:] == "in21k": __lowerCamelCase = ViTModel(A__ ).eval() else: __lowerCamelCase = ViTForImageClassification(A__ ).eval() model.load_state_dict(A__ ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: __lowerCamelCase = DeiTImageProcessor(size=config.image_size ) else: __lowerCamelCase = ViTImageProcessor(size=config.image_size ) __lowerCamelCase = image_processor(images=prepare_img() , return_tensors="""pt""" ) __lowerCamelCase = encoding["""pixel_values"""] __lowerCamelCase = model(A__ ) if base_model: __lowerCamelCase = timm_model.forward_features(A__ ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(A__ , outputs.pooler_output , atol=1E-3 ) else: __lowerCamelCase = timm_model(A__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(A__ , outputs.logits , atol=1E-3 ) Path(A__ ).mkdir(exist_ok=A__ ) print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(A__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) UpperCAmelCase_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
80
import math def lowerCamelCase__ ( A__ : int ): '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = 2 __lowerCamelCase = int(math.sqrt(A__ ) ) # Size of every segment __lowerCamelCase = [True] * (end + 1) __lowerCamelCase = [] while start <= end: if temp[start] is True: in_prime.append(A__ ) for i in range(start * start , end + 1 , A__ ): __lowerCamelCase = False start += 1 prime += in_prime __lowerCamelCase = end + 1 __lowerCamelCase = min(2 * end , A__ ) while low <= n: __lowerCamelCase = [True] * (high - low + 1) for each in in_prime: __lowerCamelCase = math.floor(low / each ) * each if t < low: t += each for j in range(A__ , high + 1 , A__ ): __lowerCamelCase = False for j in range(len(A__ ) ): if temp[j] is True: prime.append(j + low ) __lowerCamelCase = high + 1 __lowerCamelCase = min(high + end , A__ ) return prime print(sieve(10**6))
80
1