code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase = logging.get_logger(__name__) def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' __lowercase= torch.load(lowercase__ , map_location='cpu' ) if "model" in sd.keys(): __lowercase= torch.load(lowercase__ , map_location='cpu' )['model'] # pop unnecessary weights __lowercase= [ 'decoder.version', 'decoder.output_projection.weight', ] for key in keys_to_delete: if key in sd: sd.pop(lowercase__ ) __lowercase= { 'decoder.project_in_dim.weight': 'decoder.project_in.weight', 'decoder.project_out_dim.weight': 'decoder.project_out.weight', 'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight', 'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: __lowercase= sd.pop(lowercase__ ) __lowercase= list(sd.keys() ) for key in keys: if ".qkv_proj." in key: __lowercase= sd[key] # We split QKV in separate Q,K,V __lowercase= key.replace('.qkv_proj.' , '.q_proj.' ) __lowercase= key.replace('.qkv_proj.' , '.k_proj.' ) __lowercase= key.replace('.qkv_proj.' , '.v_proj.' ) __lowercase= value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 __lowercase, __lowercase, __lowercase= torch.split(lowercase__ , depth // 3 , dim=0 ) __lowercase= q __lowercase= k __lowercase= v del sd[key] return sd @torch.no_grad() def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=None ) -> Dict: '''simple docstring''' __lowercase= load_checkpoint(lowercase__ ) if config is not None: __lowercase= OPTConfig.from_pretrained(lowercase__ ) else: __lowercase= OPTConfig() __lowercase= OPTModel(lowercase__ ).half().eval() model.load_state_dict(lowercase__ ) # Check results Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) model.save_pretrained(lowercase__ ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--fairseq_path''', type=str, help=( '''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:''' ''' https://huggingface.co/models?other=opt_metasq''' ), ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''') lowerCAmelCase = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
304
from __future__ import annotations from collections.abc import Callable def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float: '''simple docstring''' __lowercase= x_start __lowercase= fnc(lowercase__ ) __lowercase= 0.0 for _ in range(lowercase__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area __lowercase= (x_end - x_start) / steps + xa __lowercase= fnc(lowercase__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step __lowercase= xa __lowercase= fxa return area if __name__ == "__main__": def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' return x**3 + x**2 print('''f(x) = x^3 + x^2''') print('''The area between the curve, x = -5, x = 5 and the x axis is:''') lowerCAmelCase = 1_0 while i <= 1_0_0_0_0_0: print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}') i *= 1_0
304
1
from math import factorial class A : def __init__(self , lowerCAmelCase , lowerCAmelCase ): __lowercase= real if isinstance(lowerCAmelCase , lowerCAmelCase ): __lowercase= [1] * rank else: __lowercase= rank def __repr__(self ): return ( f'{self.real}+' f'{"+".join(str(lowerCAmelCase )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}' ) def _A (self ): __lowercase= self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , lowerCAmelCase ) def __add__(self , lowerCAmelCase ): if not isinstance(lowerCAmelCase , lowerCAmelCase ): return Dual(self.real + other , self.duals ) __lowercase= self.duals.copy() __lowercase= other.duals.copy() if len(lowerCAmelCase ) > len(lowerCAmelCase ): o_dual.extend([1] * (len(lowerCAmelCase ) - len(lowerCAmelCase )) ) elif len(lowerCAmelCase ) < len(lowerCAmelCase ): s_dual.extend([1] * (len(lowerCAmelCase ) - len(lowerCAmelCase )) ) __lowercase= [] for i in range(len(lowerCAmelCase ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , lowerCAmelCase ) UpperCamelCase_ : int =__add__ def __sub__(self , lowerCAmelCase ): return self + other * -1 def __mul__(self , lowerCAmelCase ): if not isinstance(lowerCAmelCase , lowerCAmelCase ): __lowercase= [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , lowerCAmelCase ) __lowercase= [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , lowerCAmelCase ) UpperCamelCase_ : Tuple =__mul__ def __truediv__(self , lowerCAmelCase ): if not isinstance(lowerCAmelCase , lowerCAmelCase ): __lowercase= [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , lowerCAmelCase ) raise ValueError def __floordiv__(self , lowerCAmelCase ): if not isinstance(lowerCAmelCase , lowerCAmelCase ): __lowercase= [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , lowerCAmelCase ) raise ValueError def __pow__(self , lowerCAmelCase ): if n < 0 or isinstance(lowerCAmelCase , lowerCAmelCase ): raise ValueError('power must be a positive integer' ) if n == 0: return 1 if n == 1: return self __lowercase= self for _ in range(n - 1 ): x *= self return x def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> List[Any]: '''simple docstring''' if not callable(lowercase__ ): raise ValueError('differentiate() requires a function as input for func' ) if not isinstance(lowercase__ , (float, int) ): raise ValueError('differentiate() requires a float as input for position' ) if not isinstance(lowercase__ , lowercase__ ): raise ValueError('differentiate() requires an int as input for order' ) __lowercase= Dual(lowercase__ , 1 ) __lowercase= func(lowercase__ ) if order == 0: return result.real return result.duals[order - 1] * factorial(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod() def _lowerCamelCase( lowercase__ ) -> Optional[int]: '''simple docstring''' return y**2 * y**4 print(differentiate(f, 9, 2))
304
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_input_lengths __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= gelu_activation __lowercase= sinusoidal_embeddings __lowercase= causal __lowercase= asm __lowercase= n_langs __lowercase= vocab_size __lowercase= n_special __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= summary_type __lowercase= use_proj __lowercase= scope __lowercase= bos_token_id def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= random_attention_mask([self.batch_size, self.seq_length] ) __lowercase= None if self.use_input_lengths: __lowercase= ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , 2 ).float() __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _A (self ): return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , langs=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMWithLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) __lowercase= outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForQuestionAnswering(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model( lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , ) __lowercase= model( lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , ) ((__lowercase), )= result_with_labels.to_tuple() __lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) ((__lowercase), )= result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= self.num_labels __lowercase= XLMForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= self.num_choices __lowercase= XLMForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : int =( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) UpperCamelCase_ : Dict =( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable UpperCamelCase_ : str =( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def _A (self ): __lowercase= XLMModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ): self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertListEqual( [isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) ) self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(lowerCAmelCase ): # adds PAD dummy token __lowercase= min_length + idx + 1 __lowercase= min_length + idx + 1 __lowercase= ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ): self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertListEqual( [isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , ) self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(lowerCAmelCase ): # adds PAD dummy token __lowercase= min_length + idx + 1 __lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , ) pass @slow def _A (self ): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= XLMModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(lowerCAmelCase ) __lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president __lowercase= [ 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference __lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
304
1
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def _lowerCamelCase( lowercase__ , lowercase__=1_0 ) -> Any: '''simple docstring''' __lowercase= [] for _ in range(lowercase__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def _lowerCamelCase( lowercase__ , lowercase__=1_0 ) -> Any: '''simple docstring''' __lowercase= [] for step in range(lowercase__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: __lowercase= os.path.join(lowercase__ , 'schedule.bin' ) torch.save(scheduler.state_dict() , lowercase__ ) __lowercase= torch.load(lowercase__ ) scheduler.load_state_dict(lowercase__ ) return lrs @require_torch class A ( unittest.TestCase ): def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ): self.assertAlmostEqual(lowerCAmelCase , lowerCAmelCase , delta=lowerCAmelCase ) def _A (self ): __lowercase= torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase ) __lowercase= torch.tensor([0.4, 0.2, -0.5] ) __lowercase= nn.MSELoss() # No warmup, constant schedule, no gradient clipping __lowercase= AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(1_0_0 ): __lowercase= criterion(lowerCAmelCase , lowerCAmelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def _A (self ): __lowercase= torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase ) __lowercase= torch.tensor([0.4, 0.2, -0.5] ) __lowercase= nn.MSELoss() # No warmup, constant schedule, no gradient clipping __lowercase= Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase , weight_decay=0.0 , relative_step=lowerCAmelCase , scale_parameter=lowerCAmelCase , warmup_init=lowerCAmelCase , ) for _ in range(1_0_0_0 ): __lowercase= criterion(lowerCAmelCase , lowerCAmelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class A ( unittest.TestCase ): UpperCamelCase_ : Optional[Any] =nn.Linear(50 , 50 ) if is_torch_available() else None UpperCamelCase_ : Union[str, Any] =AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None UpperCamelCase_ : str =10 def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ): self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ): self.assertAlmostEqual(lowerCAmelCase , lowerCAmelCase , delta=lowerCAmelCase , msg=lowerCAmelCase ) def _A (self ): __lowercase= {'num_warmup_steps': 2, 'num_training_steps': 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) __lowercase= { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1E-7}, [0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14], ), } for scheduler_func, data in scheds.items(): __lowercase, __lowercase= data __lowercase= scheduler_func(self.optimizer , **lowerCAmelCase ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) __lowercase= unwrap_schedule(lowerCAmelCase , self.num_steps ) self.assertListAlmostEqual( lowerCAmelCase , lowerCAmelCase , tol=1E-2 , msg=f'failed for {scheduler_func} in normal scheduler' , ) __lowercase= scheduler_func(self.optimizer , **lowerCAmelCase ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase ) # wrap to test picklability of the schedule __lowercase= unwrap_and_save_reload_schedule(lowerCAmelCase , self.num_steps ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase , msg=f'failed for {scheduler_func} in save and reload' ) class A : def __init__(self , lowerCAmelCase ): __lowercase= fn def __call__(self , *lowerCAmelCase , **lowerCAmelCase ): return self.fn(*lowerCAmelCase , **lowerCAmelCase ) @classmethod def _A (self , lowerCAmelCase ): __lowercase= list(map(self , scheduler.lr_lambdas ) )
304
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent lowerCAmelCase = {'''UserAgent''': UserAgent().random} def _lowerCamelCase( lowercase__ ) -> dict: '''simple docstring''' __lowercase= script.contents[0] __lowercase= json.loads(data[data.find('{"config"' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class A : def __init__(self , lowerCAmelCase ): __lowercase= f'https://www.instagram.com/{username}/' __lowercase= self.get_json() def _A (self ): __lowercase= requests.get(self.url , headers=lowerCAmelCase ).text __lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__(self ): return f'{self.__class__.__name__}(\'{self.username}\')' def __str__(self ): return f'{self.fullname} ({self.username}) is {self.biography}' @property def _A (self ): return self.user_data["username"] @property def _A (self ): return self.user_data["full_name"] @property def _A (self ): return self.user_data["biography"] @property def _A (self ): return self.user_data["business_email"] @property def _A (self ): return self.user_data["external_url"] @property def _A (self ): return self.user_data["edge_followed_by"]["count"] @property def _A (self ): return self.user_data["edge_follow"]["count"] @property def _A (self ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def _A (self ): return self.user_data["profile_pic_url_hd"] @property def _A (self ): return self.user_data["is_verified"] @property def _A (self ): return self.user_data["is_private"] def _lowerCamelCase( lowercase__ = "github" ) -> None: '''simple docstring''' import os if os.environ.get('CI' ): return # test failing on GitHub Actions __lowercase= InstagramUser(lowercase__ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , lowercase__ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_5_0 assert instagram_user.number_of_followers > 1_2_0_0_0_0 assert instagram_user.number_of_followings > 1_5 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('https://instagram.' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase = InstagramUser('''github''') print(instagram_user) print(F'{instagram_user.number_of_posts = }') print(F'{instagram_user.number_of_followers = }') print(F'{instagram_user.number_of_followings = }') print(F'{instagram_user.email = }') print(F'{instagram_user.website = }') print(F'{instagram_user.profile_picture_url = }') print(F'{instagram_user.is_verified = }') print(F'{instagram_user.is_private = }')
304
1
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
304
from typing import Any import numpy as np def _lowerCamelCase( lowercase__ ) -> bool: '''simple docstring''' return np.array_equal(lowercase__ , matrix.conjugate().T ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' __lowercase= v.conjugate().T __lowercase= v_star.dot(lowercase__ ) assert isinstance(lowercase__ , np.ndarray ) return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ )) def _lowerCamelCase( ) -> None: '''simple docstring''' __lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) __lowercase= np.array([[1], [2], [3]] ) assert is_hermitian(lowercase__ ), F'{a} is not hermitian.' print(rayleigh_quotient(lowercase__ , lowercase__ ) ) __lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(lowercase__ ), F'{a} is not hermitian.' assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
304
1
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _lowerCamelCase( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ) -> Tuple: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(lowercase__ ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowercase__ ): return math.exp(t * -12.0 ) else: raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' ) __lowercase= [] for i in range(lowercase__ ): __lowercase= i / num_diffusion_timesteps __lowercase= (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) ) return torch.tensor(lowercase__ , dtype=torch.floataa ) class A ( A_ , A_ ): UpperCamelCase_ : Any =[e.name for e in KarrasDiffusionSchedulers] UpperCamelCase_ : Tuple =2 @register_to_config def __init__(self , lowerCAmelCase = 1_0_0_0 , lowerCAmelCase = 0.0_00_85 , lowerCAmelCase = 0.0_12 , lowerCAmelCase = "linear" , lowerCAmelCase = None , lowerCAmelCase = "epsilon" , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = 1.0 , lowerCAmelCase = "linspace" , lowerCAmelCase = 0 , ): if trained_betas is not None: __lowercase= torch.tensor(lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "linear": __lowercase= torch.linspace(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __lowercase= ( torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __lowercase= betas_for_alpha_bar(lowerCAmelCase , alpha_transform_type='cosine' ) elif beta_schedule == "exp": __lowercase= betas_for_alpha_bar(lowerCAmelCase , alpha_transform_type='exp' ) else: raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' ) __lowercase= 1.0 - self.betas __lowercase= torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) __lowercase= use_karras_sigmas def _A (self , lowerCAmelCase , lowerCAmelCase=None ): if schedule_timesteps is None: __lowercase= self.timesteps __lowercase= (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: __lowercase= 1 if len(lowerCAmelCase ) > 1 else 0 else: __lowercase= timestep.cpu().item() if torch.is_tensor(lowerCAmelCase ) else timestep __lowercase= self._index_counter[timestep_int] return indices[pos].item() @property def _A (self ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _A (self , lowerCAmelCase , lowerCAmelCase , ): __lowercase= self.index_for_timestep(lowerCAmelCase ) __lowercase= self.sigmas[step_index] __lowercase= sample / ((sigma**2 + 1) ** 0.5) return sample def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , ): __lowercase= num_inference_steps __lowercase= num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": __lowercase= np.linspace(0 , num_train_timesteps - 1 , lowerCAmelCase , dtype=lowerCAmelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": __lowercase= num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __lowercase= (np.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": __lowercase= num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __lowercase= (np.arange(lowerCAmelCase , 0 , -step_ratio )).round().copy().astype(lowerCAmelCase ) timesteps -= 1 else: raise ValueError( f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' ) __lowercase= np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) __lowercase= np.log(lowerCAmelCase ) __lowercase= np.interp(lowerCAmelCase , np.arange(0 , len(lowerCAmelCase ) ) , lowerCAmelCase ) if self.config.use_karras_sigmas: __lowercase= self._convert_to_karras(in_sigmas=lowerCAmelCase , num_inference_steps=self.num_inference_steps ) __lowercase= np.array([self._sigma_to_t(lowerCAmelCase , lowerCAmelCase ) for sigma in sigmas] ) __lowercase= np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) __lowercase= torch.from_numpy(lowerCAmelCase ).to(device=lowerCAmelCase ) __lowercase= torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) __lowercase= torch.from_numpy(lowerCAmelCase ) __lowercase= torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(lowerCAmelCase ).startswith('mps' ): # mps does not support float64 __lowercase= timesteps.to(lowerCAmelCase , dtype=torch.floataa ) else: __lowercase= timesteps.to(device=lowerCAmelCase ) # empty dt and derivative __lowercase= None __lowercase= None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter __lowercase= defaultdict(lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase ): # get log sigma __lowercase= np.log(lowerCAmelCase ) # get distribution __lowercase= log_sigma - log_sigmas[:, np.newaxis] # get sigmas range __lowercase= np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) __lowercase= low_idx + 1 __lowercase= log_sigmas[low_idx] __lowercase= log_sigmas[high_idx] # interpolate sigmas __lowercase= (low - log_sigma) / (low - high) __lowercase= np.clip(lowerCAmelCase , 0 , 1 ) # transform interpolation to time range __lowercase= (1 - w) * low_idx + w * high_idx __lowercase= t.reshape(sigma.shape ) return t def _A (self , lowerCAmelCase , lowerCAmelCase ): __lowercase= in_sigmas[-1].item() __lowercase= in_sigmas[0].item() __lowercase= 7.0 # 7.0 is the value used in the paper __lowercase= np.linspace(0 , 1 , lowerCAmelCase ) __lowercase= sigma_min ** (1 / rho) __lowercase= sigma_max ** (1 / rho) __lowercase= (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def _A (self ): return self.dt is None def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = True , ): __lowercase= self.index_for_timestep(lowerCAmelCase ) # advance index counter by 1 __lowercase= timestep.cpu().item() if torch.is_tensor(lowerCAmelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: __lowercase= self.sigmas[step_index] __lowercase= self.sigmas[step_index + 1] else: # 2nd order / Heun's method __lowercase= self.sigmas[step_index - 1] __lowercase= self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API __lowercase= 0 __lowercase= sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": __lowercase= sigma_hat if self.state_in_first_order else sigma_next __lowercase= sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": __lowercase= sigma_hat if self.state_in_first_order else sigma_next __lowercase= model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": __lowercase= model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' ) if self.config.clip_sample: __lowercase= pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order __lowercase= (sample - pred_original_sample) / sigma_hat # 3. delta timestep __lowercase= sigma_next - sigma_hat # store for 2nd order step __lowercase= derivative __lowercase= dt __lowercase= sample else: # 2. 2nd order / Heun's method __lowercase= (sample - pred_original_sample) / sigma_next __lowercase= (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample __lowercase= self.dt __lowercase= self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" __lowercase= None __lowercase= None __lowercase= None __lowercase= sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples __lowercase= self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase ): # mps does not support float64 __lowercase= self.timesteps.to(original_samples.device , dtype=torch.floataa ) __lowercase= timesteps.to(original_samples.device , dtype=torch.floataa ) else: __lowercase= self.timesteps.to(original_samples.device ) __lowercase= timesteps.to(original_samples.device ) __lowercase= [self.index_for_timestep(lowerCAmelCase , lowerCAmelCase ) for t in timesteps] __lowercase= sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): __lowercase= sigma.unsqueeze(-1 ) __lowercase= original_samples + noise * sigma return noisy_samples def __len__(self ): return self.config.num_train_timesteps
304
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase = logging.get_logger(__name__) class A ( A_ ): UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask'''] def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ): super().__init__( feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= spectrogram_length __lowercase= num_channels __lowercase= patch_size __lowercase= feature_size // self.patch_size[1] __lowercase= n_fft __lowercase= sampling_rate // hop_length_to_sampling_rate __lowercase= sampling_rate __lowercase= padding_value __lowercase= mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T def _A (self , lowerCAmelCase ): __lowercase= spectrogram( lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , ) __lowercase= log_spec[:, :-1] __lowercase= log_spec - 20.0 __lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled' f' with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) __lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) __lowercase= is_batched_numpy or ( isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ): __lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa ) elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __lowercase= raw_speech.astype(np.floataa ) # always return batch if not is_batched: __lowercase= [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __lowercase= [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , lowerCAmelCase ): __lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __lowercase= max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __lowercase= [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __lowercase= np.array(lowerCAmelCase ).astype(np.floataa ) # convert into correct format for padding __lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __lowercase= padded_audio_features * self.padding_value for i in range(len(lowerCAmelCase ) ): __lowercase= audio_features[i] __lowercase= feature # return as BatchFeature if return_attention_mask: __lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: __lowercase= {'audio_values': padded_audio_features} __lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase ) return encoded_inputs
304
1
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = '''▁''' lowerCAmelCase = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', '''tokenizer_config_file''': '''tokenizer_config.json''', } lowerCAmelCase = { '''vocab_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''', }, '''spm_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_config_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''', }, } lowerCAmelCase = { '''facebook/m2m100_418M''': 1_0_2_4, } # fmt: off lowerCAmelCase = { '''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''], '''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de'''] } class A ( A_ ): UpperCamelCase_ : Tuple =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : str =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : List[int] =[] UpperCamelCase_ : List[int] =[] def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="<s>" , lowerCAmelCase="</s>" , lowerCAmelCase="</s>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<unk>" , lowerCAmelCase="m2m100" , lowerCAmelCase = None , lowerCAmelCase=8 , **lowerCAmelCase , ): __lowercase= {} if sp_model_kwargs is None else sp_model_kwargs __lowercase= language_codes __lowercase= FAIRSEQ_LANGUAGE_CODES[language_codes] __lowercase= {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code} __lowercase= kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(lowerCAmelCase ) for lang_code in fairseq_language_code if self.get_lang_token(lowerCAmelCase ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , language_codes=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= vocab_file __lowercase= load_json(lowerCAmelCase ) __lowercase= {v: k for k, v in self.encoder.items()} __lowercase= spm_file __lowercase= load_spm(lowerCAmelCase , self.sp_model_kwargs ) __lowercase= len(self.encoder ) __lowercase= { self.get_lang_token(lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(lowerCAmelCase ) } __lowercase= {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowerCAmelCase )} __lowercase= {v: k for k, v in self.lang_token_to_id.items()} __lowercase= src_lang if src_lang is not None else 'en' __lowercase= tgt_lang __lowercase= self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) __lowercase= num_madeup_words @property def _A (self ): return len(self.encoder ) + len(self.lang_token_to_id ) @property def _A (self ): return self._src_lang @src_lang.setter def _A (self , lowerCAmelCase ): __lowercase= new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _A (self , lowerCAmelCase ): return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase ) def _A (self , lowerCAmelCase ): if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(lowerCAmelCase , self.encoder[self.unk_token] ) def _A (self , lowerCAmelCase ): if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(lowerCAmelCase , self.unk_token ) def _A (self , lowerCAmelCase ): __lowercase= [] __lowercase= '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token __lowercase= [] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase ) __lowercase= [1] * len(self.prefix_tokens ) __lowercase= [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(lowerCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _A (self ): __lowercase= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self ): __lowercase= self.__dict__.copy() __lowercase= None return state def __setstate__(self , lowerCAmelCase ): __lowercase= d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __lowercase= {} __lowercase= load_spm(self.spm_file , self.sp_model_kwargs ) def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= Path(lowerCAmelCase ) if not save_dir.is_dir(): raise OSError(f'{save_directory} should be a directory' ) __lowercase= save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file'] ) __lowercase= save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file'] ) save_json(self.encoder , lowerCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , lowerCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(lowerCAmelCase , 'wb' ) as fi: __lowercase= self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (str(lowerCAmelCase ), str(lowerCAmelCase )) def _A (self , lowerCAmelCase , lowerCAmelCase = "en" , lowerCAmelCase = None , lowerCAmelCase = "ro" , **lowerCAmelCase , ): __lowercase= src_lang __lowercase= tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ): if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) __lowercase= src_lang __lowercase= self(lowerCAmelCase , add_special_tokens=lowerCAmelCase , **lowerCAmelCase ) __lowercase= self.get_lang_id(lowerCAmelCase ) __lowercase= tgt_lang_id return inputs def _A (self ): self.set_src_lang_special_tokens(self.src_lang ) def _A (self ): self.set_tgt_lang_special_tokens(self.tgt_lang ) def _A (self , lowerCAmelCase ): __lowercase= self.get_lang_token(lowerCAmelCase ) __lowercase= self.lang_token_to_id[lang_token] __lowercase= [self.cur_lang_id] __lowercase= [self.eos_token_id] def _A (self , lowerCAmelCase ): __lowercase= self.get_lang_token(lowerCAmelCase ) __lowercase= self.lang_token_to_id[lang_token] __lowercase= [self.cur_lang_id] __lowercase= [self.eos_token_id] def _A (self , lowerCAmelCase ): return self.lang_code_to_token[lang] def _A (self , lowerCAmelCase ): __lowercase= self.get_lang_token(lowerCAmelCase ) return self.lang_token_to_id[lang_token] def _lowerCamelCase( lowercase__ , lowercase__ ) -> sentencepiece.SentencePieceProcessor: '''simple docstring''' __lowercase= sentencepiece.SentencePieceProcessor(**lowercase__ ) spm.Load(str(lowercase__ ) ) return spm def _lowerCamelCase( lowercase__ ) -> Union[Dict, List]: '''simple docstring''' with open(lowercase__ , 'r' ) as f: return json.load(lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> None: '''simple docstring''' with open(lowercase__ , 'w' ) as f: json.dump(lowercase__ , lowercase__ , indent=2 )
304
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' __lowercase= create_tensor(lowercase__ ) __lowercase= gather(lowercase__ ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' __lowercase= [state.process_index] __lowercase= gather_object(lowercase__ ) assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}' assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}' def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' __lowercase= create_tensor(lowercase__ ) __lowercase= broadcast(lowercase__ ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def _lowerCamelCase( lowercase__ ) -> List[Any]: '''simple docstring''' if state.is_main_process: __lowercase= torch.arange(state.num_processes + 1 ).to(state.device ) else: __lowercase= torch.arange(state.num_processes ).to(state.device ) __lowercase= pad_across_processes(lowercase__ ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def _lowerCamelCase( lowercase__ ) -> Any: '''simple docstring''' if state.num_processes != 2: return __lowercase= create_tensor(lowercase__ ) __lowercase= reduce(lowercase__ , 'sum' ) __lowercase= torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}' def _lowerCamelCase( lowercase__ ) -> Union[str, Any]: '''simple docstring''' if state.num_processes != 2: return __lowercase= create_tensor(lowercase__ ) __lowercase= reduce(lowercase__ , 'mean' ) __lowercase= torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}' def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' main() def _lowerCamelCase( ) -> List[str]: '''simple docstring''' __lowercase= PartialState() state.print(F'State: {state}' ) state.print('testing gather' ) test_gather(lowercase__ ) state.print('testing gather_object' ) test_gather_object(lowercase__ ) state.print('testing broadcast' ) test_broadcast(lowercase__ ) state.print('testing pad_across_processes' ) test_pad_across_processes(lowercase__ ) state.print('testing reduce_sum' ) test_reduce_sum(lowercase__ ) state.print('testing reduce_mean' ) test_reduce_mean(lowercase__ ) if __name__ == "__main__": main()
304
1
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase = { '''vocab_file''': { '''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''', } } lowerCAmelCase = { '''AI-Sweden/gpt-sw3-126m''': 2_0_4_8, '''AI-Sweden/gpt-sw3-350m''': 2_0_4_8, '''AI-Sweden/gpt-sw3-1.6b''': 2_0_4_8, '''AI-Sweden/gpt-sw3-6.7b''': 2_0_4_8, '''AI-Sweden/gpt-sw3-20b''': 2_0_4_8, } class A ( A_ ): UpperCamelCase_ : Tuple =VOCAB_FILES_NAMES UpperCamelCase_ : Tuple =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : List[Any] =['''input_ids''', '''attention_mask'''] def __init__(self , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase = None , **lowerCAmelCase , ): __lowercase= {} if sp_model_kwargs is None else sp_model_kwargs __lowercase= kwargs.get('name_or_path' ) if name_or_path is None: logger.warning( 'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,' ' you are testing the model, this can safely be ignored' ) __lowercase= 'None' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __lowercase= '<|endoftext|>' if eos_token is None else eos_token __lowercase= '<unk>' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __lowercase= unk_token if pad_token is None else pad_token __lowercase= eos_token if bos_token is None else bos_token else: __lowercase= '<pad>' if pad_token is None else pad_token __lowercase= '<s>' if bos_token is None else bos_token super().__init__( do_lower_case=lowerCAmelCase , remove_space=lowerCAmelCase , keep_accents=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , ) __lowercase= do_lower_case __lowercase= remove_space __lowercase= keep_accents __lowercase= vocab_file __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) # Used for whitespace normalization in input texts # fmt : off __lowercase= {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', '„'} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __lowercase= re.compile( f'[{"".join(map(lowerCAmelCase , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' ) def __getstate__(self ): __lowercase= self.__dict__.copy() __lowercase= None return state def __setstate__(self , lowerCAmelCase ): __lowercase= d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __lowercase= {} __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def _A (self ): return len(self.sp_model ) def _A (self , lowerCAmelCase ): __lowercase= self.non_printing_characters_re.sub('' , lowerCAmelCase ) # Normalize whitespaces __lowercase= ''.join([char if char not in self.whitespaces else ' ' for char in text] ) # NFC Unicode normalization __lowercase= unicodedata.normalize('NFC' , lowerCAmelCase ) return text def _A (self , lowerCAmelCase , **lowerCAmelCase ): __lowercase= self.preprocess_text(lowerCAmelCase ) return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase ) def _A (self , lowerCAmelCase ): return self.sp_model.PieceToId(lowerCAmelCase ) def _A (self , lowerCAmelCase ): return self.sp_model.IdToPiece(lowerCAmelCase ) @staticmethod def _A (lowerCAmelCase ): return out_string def _A (self , lowerCAmelCase ): __lowercase= [] __lowercase= '' __lowercase= False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCAmelCase ) + token __lowercase= True __lowercase= [] else: current_sub_tokens.append(lowerCAmelCase ) __lowercase= False out_string += self.sp_model.decode(lowerCAmelCase ) return out_string def _A (self ): __lowercase= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase , 'wb' ) as fi: __lowercase= self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,) def _A (self , lowerCAmelCase , lowerCAmelCase = False ): if isinstance(lowerCAmelCase , lowerCAmelCase ): __lowercase= self.preprocess_text(lowerCAmelCase ) __lowercase= self.sp_model.encode(lowerCAmelCase ) else: __lowercase= [self.preprocess_text(lowerCAmelCase ) for t in text] __lowercase= self.sp_model.encode(lowerCAmelCase ) if return_tensors is True or return_tensors == "pt": __lowercase= torch.tensor(lowerCAmelCase ) return token_ids def _A (self , lowerCAmelCase ): return self.sp_model.decode(lowerCAmelCase ) def _A (self , lowerCAmelCase ): __lowercase= [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()] __lowercase= ( f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(lowerCAmelCase ) + f'{self.bos_token}Bot:' ) return self.encode(text=lowerCAmelCase )
304
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class A ( A_ ): UpperCamelCase_ : torch.FloatTensor UpperCamelCase_ : torch.FloatTensor class A ( A_ , A_ ): UpperCamelCase_ : Dict =1 @register_to_config def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ): # standard deviation of the initial noise distribution __lowercase= sigma_max # setable values __lowercase= None self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = None ): return sample def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ): __lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps __lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ): __lowercase= sigma_min if sigma_min is not None else self.config.sigma_min __lowercase= sigma_max if sigma_max is not None else self.config.sigma_max __lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase , lowerCAmelCase ) __lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) __lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) ) __lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def _A (self , lowerCAmelCase , lowerCAmelCase ): return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) __lowercase= timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) __lowercase= (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda __lowercase= timesteps.to(self.discrete_sigmas.device ) __lowercase= self.discrete_sigmas[timesteps].to(sample.device ) __lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device ) __lowercase= torch.zeros_like(lowerCAmelCase ) __lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods __lowercase= diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): __lowercase= diffusion.unsqueeze(-1 ) __lowercase= drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of __lowercase= randn_tensor( sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype ) __lowercase= sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? __lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction __lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr __lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() __lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() __lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2 __lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term __lowercase= step_size.flatten() while len(step_size.shape ) < len(sample.shape ): __lowercase= step_size.unsqueeze(-1 ) __lowercase= sample + step_size * model_output __lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples __lowercase= timesteps.to(original_samples.device ) __lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps] __lowercase= ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None] ) __lowercase= noise + original_samples return noisy_samples def __len__(self ): return self.config.num_train_timesteps
304
1
from __future__ import annotations from collections.abc import Callable def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float: '''simple docstring''' __lowercase= x_start __lowercase= fnc(lowercase__ ) __lowercase= 0.0 for _ in range(lowercase__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area __lowercase= (x_end - x_start) / steps + xa __lowercase= fnc(lowercase__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step __lowercase= xa __lowercase= fxa return area if __name__ == "__main__": def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' return x**3 + x**2 print('''f(x) = x^3 + x^2''') print('''The area between the curve, x = -5, x = 5 and the x axis is:''') lowerCAmelCase = 1_0 while i <= 1_0_0_0_0_0: print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}') i *= 1_0
304
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device lowerCAmelCase = False class A ( unittest.TestCase ): pass @nightly @require_torch_gpu class A ( unittest.TestCase ): def _A (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _A (self ): __lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) __lowercase= torch.manual_seed(0 ) __lowercase= pipe.dual_guided( prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCAmelCase ) __lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= generator.manual_seed(0 ) __lowercase= pipe.dual_guided( prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def _A (self ): __lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= 'cyberpunk 2077' __lowercase= load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) __lowercase= torch.manual_seed(0 ) __lowercase= pipe.dual_guided( prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images __lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 __lowercase= 'A painting of a squirrel eating a burger ' __lowercase= torch.manual_seed(0 ) __lowercase= pipe.text_to_image( prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images __lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 __lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images __lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
304
1
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_input_lengths __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= gelu_activation __lowercase= sinusoidal_embeddings __lowercase= causal __lowercase= asm __lowercase= n_langs __lowercase= vocab_size __lowercase= n_special __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= summary_type __lowercase= use_proj __lowercase= scope __lowercase= bos_token_id def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= random_attention_mask([self.batch_size, self.seq_length] ) __lowercase= None if self.use_input_lengths: __lowercase= ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , 2 ).float() __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _A (self ): return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , langs=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMWithLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) __lowercase= outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForQuestionAnswering(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model( lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , ) __lowercase= model( lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , ) ((__lowercase), )= result_with_labels.to_tuple() __lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) ((__lowercase), )= result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= self.num_labels __lowercase= XLMForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= self.num_choices __lowercase= XLMForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : int =( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) UpperCamelCase_ : Dict =( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable UpperCamelCase_ : str =( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def _A (self ): __lowercase= XLMModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ): self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertListEqual( [isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) ) self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(lowerCAmelCase ): # adds PAD dummy token __lowercase= min_length + idx + 1 __lowercase= min_length + idx + 1 __lowercase= ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ): self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertListEqual( [isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , ) self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(lowerCAmelCase ): # adds PAD dummy token __lowercase= min_length + idx + 1 __lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , ) pass @slow def _A (self ): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= XLMModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(lowerCAmelCase ) __lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president __lowercase= [ 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference __lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
304
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase = { '''configuration_xmod''': [ '''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XmodConfig''', '''XmodOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XmodForCausalLM''', '''XmodForMaskedLM''', '''XmodForMultipleChoice''', '''XmodForQuestionAnswering''', '''XmodForSequenceClassification''', '''XmodForTokenClassification''', '''XmodModel''', '''XmodPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
304
1
import math from datetime import datetime, timedelta def _lowerCamelCase( lowercase__ ) -> datetime: '''simple docstring''' __lowercase= year % 1_9 __lowercase= year % 4 __lowercase= year % 7 __lowercase= math.floor(year / 1_0_0 ) __lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 ) __lowercase= leap_day_inhibits / 4 __lowercase= ( 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 3_0 __lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 __lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0 # PHM -> Paschal Full Moon __lowercase= ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 2_9 and days_from_phm_to_sunday == 6: return datetime(lowercase__ , 4 , 1_9 ) elif days_to_add == 2_8 and days_from_phm_to_sunday == 6: return datetime(lowercase__ , 4 , 1_8 ) else: return datetime(lowercase__ , 3 , 2_2 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3): lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was''' print(F'Easter in {year} {tense} {gauss_easter(year)}')
304
import math from datetime import datetime, timedelta def _lowerCamelCase( lowercase__ ) -> datetime: '''simple docstring''' __lowercase= year % 1_9 __lowercase= year % 4 __lowercase= year % 7 __lowercase= math.floor(year / 1_0_0 ) __lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 ) __lowercase= leap_day_inhibits / 4 __lowercase= ( 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 3_0 __lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 __lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0 # PHM -> Paschal Full Moon __lowercase= ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 2_9 and days_from_phm_to_sunday == 6: return datetime(lowercase__ , 4 , 1_9 ) elif days_to_add == 2_8 and days_from_phm_to_sunday == 6: return datetime(lowercase__ , 4 , 1_8 ) else: return datetime(lowercase__ , 3 , 2_2 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3): lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was''' print(F'Easter in {year} {tense} {gauss_easter(year)}')
304
1
import os def _lowerCamelCase( ) -> str: '''simple docstring''' with open(os.path.dirname(lowercase__ ) + '/grid.txt' ) as f: __lowercase= [] # noqa: E741 for _ in range(2_0 ): l.append([int(lowercase__ ) for x in f.readline().split()] ) __lowercase= 0 # right for i in range(2_0 ): for j in range(1_7 ): __lowercase= l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: __lowercase= temp # down for i in range(1_7 ): for j in range(2_0 ): __lowercase= l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: __lowercase= temp # diagonal 1 for i in range(1_7 ): for j in range(1_7 ): __lowercase= l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: __lowercase= temp # diagonal 2 for i in range(1_7 ): for j in range(3 , 2_0 ): __lowercase= l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: __lowercase= temp return maximum if __name__ == "__main__": print(solution())
304
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class A ( A_ ): UpperCamelCase_ : Optional[int] ='''blenderbot-small''' UpperCamelCase_ : Optional[Any] =['''past_key_values'''] UpperCamelCase_ : Optional[int] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=5_1_2 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ): __lowercase= vocab_size __lowercase= max_position_embeddings __lowercase= d_model __lowercase= encoder_ffn_dim __lowercase= encoder_layers __lowercase= encoder_attention_heads __lowercase= decoder_ffn_dim __lowercase= decoder_layers __lowercase= decoder_attention_heads __lowercase= dropout __lowercase= attention_dropout __lowercase= activation_dropout __lowercase= activation_function __lowercase= init_std __lowercase= encoder_layerdrop __lowercase= decoder_layerdrop __lowercase= use_cache __lowercase= encoder_layers __lowercase= scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , ) class A ( A_ ): @property def _A (self ): if self.task in ["default", "seq2seq-lm"]: __lowercase= OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: __lowercase= {0: 'batch'} __lowercase= {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __lowercase= {0: 'batch', 1: 'decoder_sequence'} __lowercase= {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' ) elif self.task == "causal-lm": # TODO: figure this case out. __lowercase= OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: __lowercase, __lowercase= self.num_layers for i in range(lowerCAmelCase ): __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} else: __lowercase= OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ] ) return common_inputs @property def _A (self ): if self.task in ["default", "seq2seq-lm"]: __lowercase= super().outputs else: __lowercase= super(lowerCAmelCase , self ).outputs if self.use_past: __lowercase, __lowercase= self.num_layers for i in range(lowerCAmelCase ): __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # Generate decoder inputs __lowercase= seq_length if not self.use_past else 1 __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) __lowercase= {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} __lowercase= dict(**lowerCAmelCase , **lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __lowercase, __lowercase= common_inputs['input_ids'].shape __lowercase= common_inputs['decoder_input_ids'].shape[1] __lowercase, __lowercase= self.num_attention_heads __lowercase= ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase= decoder_seq_length + 3 __lowercase= ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __lowercase= torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 ) __lowercase= [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __lowercase, __lowercase= self.num_layers __lowercase= min(lowerCAmelCase , lowerCAmelCase ) __lowercase= max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers __lowercase= 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(lowerCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), ) ) # TODO: test this. __lowercase= encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(lowerCAmelCase , lowerCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) ) return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __lowercase, __lowercase= common_inputs['input_ids'].shape # Not using the same length for past_key_values __lowercase= seqlen + 2 __lowercase, __lowercase= self.num_layers __lowercase, __lowercase= self.num_attention_heads __lowercase= ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase= common_inputs['attention_mask'].dtype __lowercase= torch.cat( [common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 ) __lowercase= [ (torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase ) ] return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __lowercase= compute_effective_axis_dimension( lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __lowercase= tokenizer.num_special_tokens_to_add(lowerCAmelCase ) __lowercase= compute_effective_axis_dimension( lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence __lowercase= [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size __lowercase= dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) ) return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): if self.task in ["default", "seq2seq-lm"]: __lowercase= self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) elif self.task == "causal-lm": __lowercase= self._generate_dummy_inputs_for_causal_lm( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) else: __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if self.task in ["default", "seq2seq-lm"]: __lowercase= super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) else: __lowercase= super(lowerCAmelCase , self )._flatten_past_key_values_( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
304
1
def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' __lowercase= 0 # if input_string is "aba" than new_input_string become "a|b|a" __lowercase= '' __lowercase= '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(lowercase__ ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring __lowercase, __lowercase= 0, 0 # length[i] shows the length of palindromic substring with center i __lowercase= [1 for i in range(len(lowercase__ ) )] # for each character in new_string find corresponding palindromic string __lowercase= 0 for j in range(len(lowercase__ ) ): __lowercase= 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(lowercase__ ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 __lowercase= 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: __lowercase= j - k + 1 # noqa: E741 __lowercase= j + k - 1 # update max_length and start position if max_length < length[j]: __lowercase= length[j] __lowercase= j # create that string __lowercase= new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
304
from math import factorial, radians def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float: '''simple docstring''' __lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians __lowercase= radians(lowercase__ ) __lowercase= angle_in_radians __lowercase= 3 __lowercase= -1 for _ in range(lowercase__ ): result += (b * (angle_in_radians**a)) / factorial(lowercase__ ) __lowercase= -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(lowercase__ , lowercase__ ) if __name__ == "__main__": __import__('''doctest''').testmod()
304
1
import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class A ( A_ , unittest.TestCase ): UpperCamelCase_ : Any =ReformerTokenizer UpperCamelCase_ : int =ReformerTokenizerFast UpperCamelCase_ : List[str] =True UpperCamelCase_ : Optional[Any] =False UpperCamelCase_ : int =True def _A (self ): super().setUp() __lowercase= ReformerTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def _A (self ): __lowercase= '<s>' __lowercase= 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase ) def _A (self ): __lowercase= list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(lowerCAmelCase ) , 1_0_0_0 ) def _A (self ): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def _A (self ): if not self.test_rust_tokenizer: return __lowercase= self.get_tokenizer() __lowercase= self.get_rust_tokenizer() __lowercase= 'I was born in 92000, and this is falsé.' __lowercase= tokenizer.tokenize(lowerCAmelCase ) __lowercase= rust_tokenizer.tokenize(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowercase= tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) __lowercase= rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowercase= self.get_rust_tokenizer() __lowercase= tokenizer.encode(lowerCAmelCase ) __lowercase= rust_tokenizer.encode(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) def _A (self , lowerCAmelCase=1_5 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __lowercase= self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase ) # Simple input __lowercase= 'This is a simple input' __lowercase= ['This is a simple input 1', 'This is a simple input 2'] __lowercase= ('This is a simple input', 'This is a pair') __lowercase= [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' ) # Simple input self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' ) # Simple input self.assertRaises( lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' , ) # Pair input self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' ) # Pair input self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' ) # Pair input self.assertRaises( lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' , ) def _A (self ): pass def _A (self ): __lowercase= ReformerTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase ) __lowercase= tokenizer.tokenize('This is a test' ) self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) __lowercase= tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowerCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) __lowercase= tokenizer.convert_tokens_to_ids(lowerCAmelCase ) self.assertListEqual( lowerCAmelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) __lowercase= tokenizer.convert_ids_to_tokens(lowerCAmelCase ) self.assertListEqual( lowerCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def _A (self ): return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' ) @slow def _A (self ): __lowercase= 'Hello World!' __lowercase= [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7] self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) ) @slow def _A (self ): __lowercase= ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) __lowercase= [ 1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 3_5, 2_8, 2_7_5, 3, 2_5_9, 2_9_7, 2_6_0, 8_4, 4, 3_5, 1_1_0, 4_4, 8, 2_5_9, 9_1, 2_6_8, 2_1, 1_1, 2_0_9, 2_7_4, 1_0_9, 2_6_6, 2_7_7, 1_1_7, 8_6, 9_3, 3_1_5, 2_5_8, 2_7_8, 2_5_8, 2_7_7, 2_5_8, 0, 2_5_8, 2_8_8, 2_5_8, 3_1_9, 2_5_8, 0, 2_5_8, 0, 2_5_8, 0, 2_5_8, 0, 2_5_8, 2_8_7, 2_5_8, 3_1_5, 2_5_8, 2_8_9, 2_5_8, 2_7_8, 9_9, 2_6_9, 2_6_6, 2_6_2, 8, 2_5_9, 2_4_1, 4, 2_1_7, 2_3_0, 2_6_8, 2_6_6, 5_5, 1_6_8, 1_0_6, 7_5, 1_9_3, 2_6_6, 2_2_3, 2_7, 4_9, 2_6, 2_8_2, 2_5, 2_6_4, 2_9_9, 1_9, 2_6, 0, 2_5_8, 2_7_7, 1_1_7, 8_6, 9_3, 1_7_6, 1_8_3, 2_7_0, 1_1, 2_6_2, 4_2, 6_1, 2_6_5, ] self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) ) @require_torch @slow def _A (self ): import torch from transformers import ReformerConfig, ReformerModel # Build sequence __lowercase= list(self.big_tokenizer.get_vocab().keys() )[:1_0] __lowercase= ' '.join(lowerCAmelCase ) __lowercase= self.big_tokenizer.encode_plus(lowerCAmelCase , return_tensors='pt' ) __lowercase= self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='pt' ) __lowercase= ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) __lowercase= encoded_sequence['input_ids'].shape __lowercase= ReformerModel(lowerCAmelCase ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowerCAmelCase ) model(**lowerCAmelCase ) @slow def _A (self ): # fmt: off __lowercase= {'input_ids': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 __lowercase= [ 'This is a very simple sentence.', 'The quick brown fox jumps over the lazy dog.', ] self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase , model_name='google/reformer-crime-and-punishment' , revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a' , padding=lowerCAmelCase , sequences=lowerCAmelCase , )
304
lowerCAmelCase = [ 9_9_9, 8_0_0, 7_9_9, 6_0_0, 5_9_9, 5_0_0, 4_0_0, 3_9_9, 3_7_7, 3_5_5, 3_3_3, 3_1_1, 2_8_8, 2_6_6, 2_4_4, 2_2_2, 2_0_0, 1_9_9, 1_7_7, 1_5_5, 1_3_3, 1_1_1, 8_8, 6_6, 4_4, 2_2, 0, ] lowerCAmelCase = [ 9_9_9, 9_7_6, 9_5_2, 9_2_8, 9_0_5, 8_8_2, 8_5_8, 8_5_7, 8_1_0, 7_6_2, 7_1_5, 7_1_4, 5_7_2, 4_2_9, 4_2_8, 2_8_6, 2_8_5, 2_3_8, 1_9_0, 1_4_3, 1_4_2, 1_1_8, 9_5, 7_1, 4_7, 2_4, 0, ] lowerCAmelCase = [ 9_9_9, 9_8_8, 9_7_7, 9_6_6, 9_5_5, 9_4_4, 9_3_3, 9_2_2, 9_1_1, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_5_0, 3_0_0, 2_9_9, 2_6_6, 2_3_3, 2_0_0, 1_9_9, 1_7_9, 1_5_9, 1_4_0, 1_2_0, 1_0_0, 9_9, 8_8, 7_7, 6_6, 5_5, 4_4, 3_3, 2_2, 1_1, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_5, 9_9_2, 9_8_9, 9_8_5, 9_8_1, 9_7_8, 9_7_5, 9_7_1, 9_6_7, 9_6_4, 9_6_1, 9_5_7, 9_5_6, 9_5_1, 9_4_7, 9_4_2, 9_3_7, 9_3_3, 9_2_8, 9_2_3, 9_1_9, 9_1_4, 9_1_3, 9_0_8, 9_0_3, 8_9_7, 8_9_2, 8_8_7, 8_8_1, 8_7_6, 8_7_1, 8_7_0, 8_6_4, 8_5_8, 8_5_2, 8_4_6, 8_4_0, 8_3_4, 8_2_8, 8_2_7, 8_2_0, 8_1_3, 8_0_6, 7_9_9, 7_9_2, 7_8_5, 7_8_4, 7_7_7, 7_7_0, 7_6_3, 7_5_6, 7_4_9, 7_4_2, 7_4_1, 7_3_3, 7_2_4, 7_1_6, 7_0_7, 6_9_9, 6_9_8, 6_8_8, 6_7_7, 6_6_6, 6_5_6, 6_5_5, 6_4_5, 6_3_4, 6_2_3, 6_1_3, 6_1_2, 5_9_8, 5_8_4, 5_7_0, 5_6_9, 5_5_5, 5_4_1, 5_2_7, 5_2_6, 5_0_5, 4_8_4, 4_8_3, 4_6_2, 4_4_0, 4_3_9, 3_9_6, 3_9_5, 3_5_2, 3_5_1, 3_0_8, 3_0_7, 2_6_4, 2_6_3, 2_2_0, 2_1_9, 1_7_6, 1_3_2, 8_8, 4_4, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_7, 9_9_5, 9_9_2, 9_9_0, 9_8_8, 9_8_6, 9_8_4, 9_8_1, 9_7_9, 9_7_7, 9_7_5, 9_7_2, 9_7_0, 9_6_8, 9_6_6, 9_6_4, 9_6_1, 9_5_9, 9_5_7, 9_5_6, 9_5_4, 9_5_1, 9_4_9, 9_4_6, 9_4_4, 9_4_1, 9_3_9, 9_3_6, 9_3_4, 9_3_1, 9_2_9, 9_2_6, 9_2_4, 9_2_1, 9_1_9, 9_1_6, 9_1_4, 9_1_3, 9_1_0, 9_0_7, 9_0_5, 9_0_2, 8_9_9, 8_9_6, 8_9_3, 8_9_1, 8_8_8, 8_8_5, 8_8_2, 8_7_9, 8_7_7, 8_7_4, 8_7_1, 8_7_0, 8_6_7, 8_6_4, 8_6_1, 8_5_8, 8_5_5, 8_5_2, 8_4_9, 8_4_6, 8_4_3, 8_4_0, 8_3_7, 8_3_4, 8_3_1, 8_2_8, 8_2_7, 8_2_4, 8_2_1, 8_1_7, 8_1_4, 8_1_1, 8_0_8, 8_0_4, 8_0_1, 7_9_8, 7_9_5, 7_9_1, 7_8_8, 7_8_5, 7_8_4, 7_8_0, 7_7_7, 7_7_4, 7_7_0, 7_6_6, 7_6_3, 7_6_0, 7_5_6, 7_5_2, 7_4_9, 7_4_6, 7_4_2, 7_4_1, 7_3_7, 7_3_3, 7_3_0, 7_2_6, 7_2_2, 7_1_8, 7_1_4, 7_1_0, 7_0_7, 7_0_3, 6_9_9, 6_9_8, 6_9_4, 6_9_0, 6_8_5, 6_8_1, 6_7_7, 6_7_3, 6_6_9, 6_6_4, 6_6_0, 6_5_6, 6_5_5, 6_5_0, 6_4_6, 6_4_1, 6_3_6, 6_3_2, 6_2_7, 6_2_2, 6_1_8, 6_1_3, 6_1_2, 6_0_7, 6_0_2, 5_9_6, 5_9_1, 5_8_6, 5_8_0, 5_7_5, 5_7_0, 5_6_9, 5_6_3, 5_5_7, 5_5_1, 5_4_5, 5_3_9, 5_3_3, 5_2_7, 5_2_6, 5_1_9, 5_1_2, 5_0_5, 4_9_8, 4_9_1, 4_8_4, 4_8_3, 4_7_4, 4_6_6, 4_5_7, 4_4_9, 4_4_0, 4_3_9, 4_2_8, 4_1_8, 4_0_7, 3_9_6, 3_9_5, 3_8_1, 3_6_6, 3_5_2, 3_5_1, 3_3_0, 3_0_8, 3_0_7, 2_8_6, 2_6_4, 2_6_3, 2_4_2, 2_2_0, 2_1_9, 1_7_6, 1_7_5, 1_3_2, 1_3_1, 8_8, 4_4, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_1, 9_8_2, 9_7_4, 9_6_6, 9_5_8, 9_5_0, 9_4_1, 9_3_3, 9_2_5, 9_1_6, 9_0_8, 9_0_0, 8_9_9, 8_7_4, 8_5_0, 8_2_5, 8_0_0, 7_9_9, 7_0_0, 6_0_0, 5_0_0, 4_0_0, 3_0_0, 2_0_0, 1_0_0, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_2, 9_8_5, 9_7_8, 9_7_1, 9_6_4, 9_5_7, 9_4_9, 9_4_2, 9_3_5, 9_2_8, 9_2_1, 9_1_4, 9_0_7, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_0_0, 2_9_9, 2_0_0, 1_9_9, 1_0_0, 9_9, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_6, 9_9_2, 9_8_9, 9_8_5, 9_8_2, 9_7_9, 9_7_5, 9_7_2, 9_6_8, 9_6_5, 9_6_1, 9_5_8, 9_5_5, 9_5_1, 9_4_8, 9_4_4, 9_4_1, 9_3_8, 9_3_4, 9_3_1, 9_2_7, 9_2_4, 9_2_0, 9_1_7, 9_1_4, 9_1_0, 9_0_7, 9_0_3, 9_0_0, 8_9_9, 8_9_1, 8_8_4, 8_7_6, 8_6_9, 8_6_1, 8_5_3, 8_4_6, 8_3_8, 8_3_0, 8_2_3, 8_1_5, 8_0_8, 8_0_0, 7_9_9, 7_8_8, 7_7_7, 7_6_6, 7_5_5, 7_4_4, 7_3_3, 7_2_2, 7_1_1, 7_0_0, 6_9_9, 6_8_8, 6_7_7, 6_6_6, 6_5_5, 6_4_4, 6_3_3, 6_2_2, 6_1_1, 6_0_0, 5_9_9, 5_8_5, 5_7_1, 5_5_7, 5_4_2, 5_2_8, 5_1_4, 5_0_0, 4_9_9, 4_8_5, 4_7_1, 4_5_7, 4_4_2, 4_2_8, 4_1_4, 4_0_0, 3_9_9, 3_7_9, 3_5_9, 3_4_0, 3_2_0, 3_0_0, 2_9_9, 2_7_9, 2_5_9, 2_4_0, 2_2_0, 2_0_0, 1_9_9, 1_6_6, 1_3_3, 1_0_0, 9_9, 6_6, 3_3, 0, ]
304
1
from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class A ( A_ , A_ ): UpperCamelCase_ : str ='''pixel_values''' UpperCamelCase_ : Optional[int] =False UpperCamelCase_ : List[Any] =TimmBackboneConfig def __init__(self , lowerCAmelCase , **lowerCAmelCase ): requires_backends(self , 'timm' ) super().__init__(lowerCAmelCase ) __lowercase= config if config.backbone is None: raise ValueError('backbone is not set in the config. Please set it to a timm model name.' ) if config.backbone not in timm.list_models(): raise ValueError(f'backbone {config.backbone} is not supported by timm.' ) if hasattr(lowerCAmelCase , 'out_features' ) and config.out_features is not None: raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' ) __lowercase= getattr(lowerCAmelCase , 'use_pretrained_backbone' , lowerCAmelCase ) if pretrained is None: raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' ) # We just take the final layer by default. This matches the default for the transformers models. __lowercase= config.out_indices if getattr(lowerCAmelCase , 'out_indices' , lowerCAmelCase ) is not None else (-1,) __lowercase= timm.create_model( config.backbone , pretrained=lowerCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCAmelCase , **lowerCAmelCase , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. __lowercase= self._backbone.return_layers __lowercase= {layer['module']: str(lowerCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(lowerCAmelCase ) @classmethod def _A (cls , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ): requires_backends(cls , ['vision', 'timm'] ) from ...models.timm_backbone import TimmBackboneConfig __lowercase= kwargs.pop('config' , TimmBackboneConfig() ) __lowercase= kwargs.pop('use_timm_backbone' , lowerCAmelCase ) if not use_timm: raise ValueError('use_timm_backbone must be True for timm backbones' ) __lowercase= kwargs.pop('num_channels' , config.num_channels ) __lowercase= kwargs.pop('features_only' , config.features_only ) __lowercase= kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone ) __lowercase= kwargs.pop('out_indices' , config.out_indices ) __lowercase= TimmBackboneConfig( backbone=lowerCAmelCase , num_channels=lowerCAmelCase , features_only=lowerCAmelCase , use_pretrained_backbone=lowerCAmelCase , out_indices=lowerCAmelCase , ) return super()._from_config(lowerCAmelCase , **lowerCAmelCase ) def _A (self , lowerCAmelCase ): pass def _A (self , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase ): __lowercase= return_dict if return_dict is not None else self.config.use_return_dict __lowercase= ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase= output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError('Cannot output attentions for timm backbones at the moment' ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone __lowercase= self._all_layers __lowercase= self._backbone(lowerCAmelCase , **lowerCAmelCase ) __lowercase= self._return_layers __lowercase= tuple(hidden_states[i] for i in self.out_indices ) else: __lowercase= self._backbone(lowerCAmelCase , **lowerCAmelCase ) __lowercase= None __lowercase= tuple(lowerCAmelCase ) __lowercase= tuple(lowerCAmelCase ) if hidden_states is not None else None if not return_dict: __lowercase= (feature_maps,) if output_hidden_states: __lowercase= output + (hidden_states,) return output return BackboneOutput(feature_maps=lowerCAmelCase , hidden_states=lowerCAmelCase , attentions=lowerCAmelCase )
304
from __future__ import annotations import numpy as np def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' return np.maximum(0 , lowercase__ ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
304
1
import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]: '''simple docstring''' __lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' __lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' ) __lowercase= transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ), ] ) __lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ ) return image def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' if "visual_encoder" in key: __lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ ) if "blocks" in key: __lowercase= re.sub(R'blocks' , 'layers' , lowercase__ ) if "attn" in key: __lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ ) if "norm1" in key: __lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ ) if "norm2" in key: __lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ ) if "encoder.norm" in key: __lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ ) if "encoder.patch_embed.proj" in key: __lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ ) if "encoder.pos_embed" in key: __lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ ) if "encoder.cls_token" in key: __lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ ) if "self_attn" in key: __lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ ) return key @torch.no_grad() def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int: '''simple docstring''' if config_path is not None: __lowercase= BlipConfig.from_pretrained(lowercase__ ) else: __lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} ) __lowercase= BlipForConditionalGeneration(lowercase__ ).eval() __lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth' __lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' ) __lowercase= pt_model.eval() __lowercase= pt_model.state_dict() for key in modified_state_dict.copy(): __lowercase= modified_state_dict.pop(lowercase__ ) __lowercase= rename_key(lowercase__ ) __lowercase= value hf_model.load_state_dict(lowercase__ ) __lowercase= 3_8_4 __lowercase= load_demo_image(image_size=lowercase__ , device='cpu' ) __lowercase= BertTokenizer.from_pretrained('bert-base-uncased' ) __lowercase= tokenizer(['a picture of'] ).input_ids __lowercase= hf_model.generate(lowercase__ , lowercase__ ) assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2] __lowercase= hf_model.generate(lowercase__ ) assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(lowercase__ ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' __lowercase= ( 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth' ) __lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' ) vqa_model.eval() __lowercase= vqa_model.state_dict() for key in modified_state_dict.copy(): __lowercase= modified_state_dict.pop(lowercase__ ) __lowercase= rename_key(lowercase__ ) __lowercase= value __lowercase= BlipForQuestionAnswering(lowercase__ ) hf_vqa_model.load_state_dict(lowercase__ ) __lowercase= ['How many dogs are in this image?'] __lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids __lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' ) __lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth' __lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' ) itm_model.eval() __lowercase= itm_model.state_dict() for key in modified_state_dict.copy(): __lowercase= modified_state_dict.pop(lowercase__ ) __lowercase= rename_key(lowercase__ ) __lowercase= value __lowercase= BlipForImageTextRetrieval(lowercase__ ) __lowercase= ['A picture of a woman with a dog sitting in a beach'] __lowercase= tokenizer( lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids hf_itm_model.load_state_dict(lowercase__ ) hf_itm_model.eval() __lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ ) __lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ ) assert out[0].item() == 0.2110_6874_9427_7954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') lowerCAmelCase = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
304
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int: '''simple docstring''' __lowercase= 2**power __lowercase= str(lowercase__ ) __lowercase= list(lowercase__ ) __lowercase= 0 for i in list_num: sum_of_num += int(lowercase__ ) return sum_of_num if __name__ == "__main__": lowerCAmelCase = int(input('''Enter the power of 2: ''').strip()) print('''2 ^ ''', power, ''' = ''', 2**power) lowerCAmelCase = solution(power) print('''Sum of the digits is: ''', result)
304
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCAmelCase = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''SpeechEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''FlaxSpeechEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
304
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int: '''simple docstring''' __lowercase= {} if train_file is not None: __lowercase= [train_file] if eval_file is not None: __lowercase= [eval_file] if test_file is not None: __lowercase= [test_file] __lowercase= datasets.load_dataset('csv' , data_files=lowercase__ ) __lowercase= list(ds[list(files.keys() )[0]].features.keys() ) __lowercase= features_name.pop(lowercase__ ) __lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) ) __lowercase= {label: i for i, label in enumerate(lowercase__ )} __lowercase= tokenizer.model_input_names __lowercase= {} if len(lowercase__ ) == 1: for k in files.keys(): __lowercase= ds[k].map( lambda lowercase__ : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , ) elif len(lowercase__ ) == 2: for k in files.keys(): __lowercase= ds[k].map( lambda lowercase__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: __lowercase= {k: v for k, v in ex.items() if k in input_names} __lowercase= labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: __lowercase= {k: v for k, v in ex.items() if k in input_names} __lowercase= labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: __lowercase= {k: v for k, v in ex.items() if k in input_names} __lowercase= labelaid[ex[label_name]] yield (d, label) __lowercase= ( tf.data.Dataset.from_generator( lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: __lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) __lowercase= ( tf.data.Dataset.from_generator( lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: __lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) __lowercase= ( tf.data.Dataset.from_generator( lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: __lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid lowerCAmelCase = logging.getLogger(__name__) @dataclass class A : UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} ) UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} ) UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} ) UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} ) UpperCamelCase_ : int =field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class A : UpperCamelCase_ : str =field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def _lowerCamelCase( ) -> Optional[Any]: '''simple docstring''' __lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) __lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.info( F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ' F'16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowercase= AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowercase, __lowercase, __lowercase, __lowercase= get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) __lowercase= AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): __lowercase= TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , ) def compute_metrics(lowercase__ ) -> Dict: __lowercase= np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer __lowercase= TFTrainer( model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowercase= {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __lowercase= trainer.evaluate() __lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' ) with open(lowercase__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(F' {key} = {value}' ) writer.write(F'{key} = {value}\n' ) results.update(lowercase__ ) return results if __name__ == "__main__": main()
304
1
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path lowerCAmelCase = [ {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''}, {'''dataset''': '''snli''', '''config_name''': '''plain_text'''}, {'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''}, {'''dataset''': '''wiki40b''', '''config_name''': '''en'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''}, {'''dataset''': '''natural_questions''', '''config_name''': '''default'''}, ] def _lowerCamelCase( lowercase__=True ) -> List[Any]: '''simple docstring''' if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A_ ) ) class A ( A_ ): UpperCamelCase_ : Optional[int] =None UpperCamelCase_ : str =None def _A (self , lowerCAmelCase , lowerCAmelCase ): with TemporaryDirectory() as tmp_dir: __lowercase= dataset_module_factory(lowerCAmelCase , cache_dir=lowerCAmelCase ) __lowercase= import_main_class(dataset_module.module_path , dataset=lowerCAmelCase ) __lowercase= builder_cls( cache_dir=lowerCAmelCase , config_name=lowerCAmelCase , hash=dataset_module.hash , ) __lowercase= '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=lowerCAmelCase ).replace(os.sep , '/' ), config.DATASET_INFO_FILENAME, ] ) __lowercase= cached_path(lowerCAmelCase , cache_dir=lowerCAmelCase ) self.assertTrue(os.path.exists(lowerCAmelCase ) ) @pytest.mark.integration def _lowerCamelCase( lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple' __lowercase= dataset_module_factory('wikipedia' , cache_dir=lowercase__ ) __lowercase= import_main_class(dataset_module.module_path ) __lowercase= builder_cls( cache_dir=lowercase__ , config_name='20220301.frr' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam __lowercase= None builder_instance.download_and_prepare() __lowercase= builder_instance.as_dataset() assert ds @pytest.mark.integration def _lowerCamelCase( lowercase__ ) -> Tuple: '''simple docstring''' __lowercase= dataset_module_factory('wikipedia' , cache_dir=lowercase__ ) __lowercase= import_main_class(dataset_module.module_path , dataset=lowercase__ ) __lowercase= builder_cls( cache_dir=lowercase__ , config_name='20220301.frr' , hash=dataset_module.hash , ) __lowercase= builder_instance.as_streaming_dataset() assert ds assert isinstance(lowercase__ , lowercase__ ) assert "train" in ds assert isinstance(ds['train'] , lowercase__ ) assert next(iter(ds['train'] ) )
304
import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A ( A_ ): def _A (self ): __lowercase= self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) ) self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) ) class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ): __lowercase= parent __lowercase= batch_size __lowercase= image_size __lowercase= patch_sizes __lowercase= patch_stride __lowercase= patch_padding __lowercase= is_training __lowercase= use_labels __lowercase= num_labels __lowercase= num_channels __lowercase= embed_dim __lowercase= num_heads __lowercase= stride_kv __lowercase= depth __lowercase= cls_token __lowercase= attention_drop_rate __lowercase= initializer_range __lowercase= layer_norm_eps def _A (self ): __lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.num_labels ) __lowercase= self.get_config() return config, pixel_values, labels def _A (self ): return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= CvtModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= (self.image_size, self.image_size) __lowercase, __lowercase= image_size[0], image_size[1] for i in range(len(self.depth ) ): __lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) __lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= CvtForImageClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() __lowercase, __lowercase, __lowercase= config_and_inputs __lowercase= {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else () UpperCamelCase_ : List[str] =( {'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification} if is_torch_available() else {} ) UpperCamelCase_ : str =False UpperCamelCase_ : List[Any] =False UpperCamelCase_ : Any =False UpperCamelCase_ : Union[str, Any] =False UpperCamelCase_ : Tuple =False def _A (self ): __lowercase= CvtModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 ) def _A (self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _A (self ): return @unittest.skip(reason='Cvt does not output attentions' ) def _A (self ): pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def _A (self ): pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def _A (self ): pass def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= model_class(lowerCAmelCase ) __lowercase= inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def _A (self ): def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() with torch.no_grad(): __lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) ) __lowercase= outputs.hidden_states __lowercase= len(self.model_tester.depth ) self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase= True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _A (self ): pass @slow def _A (self ): for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= CvtModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def _lowerCamelCase( ) -> Optional[int]: '''simple docstring''' __lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class A ( unittest.TestCase ): @cached_property def _A (self ): return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def _A (self ): __lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase ) __lowercase= self.default_image_processor __lowercase= prepare_img() __lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase ) # forward pass with torch.no_grad(): __lowercase= model(**lowerCAmelCase ) # verify the logits __lowercase= torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase ) __lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
304
1
from math import factorial, radians def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float: '''simple docstring''' __lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians __lowercase= radians(lowercase__ ) __lowercase= angle_in_radians __lowercase= 3 __lowercase= -1 for _ in range(lowercase__ ): result += (b * (angle_in_radians**a)) / factorial(lowercase__ ) __lowercase= -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(lowercase__ , lowercase__ ) if __name__ == "__main__": __import__('''doctest''').testmod()
304
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MraForMaskedLM''', '''MraForMultipleChoice''', '''MraForQuestionAnswering''', '''MraForSequenceClassification''', '''MraForTokenClassification''', '''MraLayer''', '''MraModel''', '''MraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
304
1
from __future__ import annotations import math import random from typing import Any class A : def __init__(self ): __lowercase= [] __lowercase= 0 __lowercase= 0 def _A (self ): return self.head == self.tail def _A (self , lowerCAmelCase ): self.data.append(lowerCAmelCase ) __lowercase= self.tail + 1 def _A (self ): __lowercase= self.data[self.head] __lowercase= self.head + 1 return ret def _A (self ): return self.tail - self.head def _A (self ): print(self.data ) print('**************' ) print(self.data[self.head : self.tail] ) class A : def __init__(self , lowerCAmelCase ): __lowercase= data __lowercase= None __lowercase= None __lowercase= 1 def _A (self ): return self.data def _A (self ): return self.left def _A (self ): return self.right def _A (self ): return self.height def _A (self , lowerCAmelCase ): __lowercase= data def _A (self , lowerCAmelCase ): __lowercase= node def _A (self , lowerCAmelCase ): __lowercase= node def _A (self , lowerCAmelCase ): __lowercase= height def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' if node is None: return 0 return node.get_height() def _lowerCamelCase( lowercase__ , lowercase__ ) -> int: '''simple docstring''' if a > b: return a return b def _lowerCamelCase( lowercase__ ) -> MyNode: '''simple docstring''' print('left rotation node:' , node.get_data() ) __lowercase= node.get_left() assert ret is not None node.set_left(ret.get_right() ) ret.set_right(lowercase__ ) __lowercase= my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(lowercase__ ) __lowercase= my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(lowercase__ ) return ret def _lowerCamelCase( lowercase__ ) -> MyNode: '''simple docstring''' print('right rotation node:' , node.get_data() ) __lowercase= node.get_right() assert ret is not None node.set_right(ret.get_left() ) ret.set_left(lowercase__ ) __lowercase= my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(lowercase__ ) __lowercase= my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(lowercase__ ) return ret def _lowerCamelCase( lowercase__ ) -> MyNode: '''simple docstring''' __lowercase= node.get_left() assert left_child is not None node.set_left(left_rotation(lowercase__ ) ) return right_rotation(lowercase__ ) def _lowerCamelCase( lowercase__ ) -> MyNode: '''simple docstring''' __lowercase= node.get_right() assert right_child is not None node.set_right(right_rotation(lowercase__ ) ) return left_rotation(lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> MyNode | None: '''simple docstring''' if node is None: return MyNode(lowercase__ ) if data < node.get_data(): node.set_left(insert_node(node.get_left() , lowercase__ ) ) if ( get_height(node.get_left() ) - get_height(node.get_right() ) == 2 ): # an unbalance detected __lowercase= node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child __lowercase= right_rotation(lowercase__ ) else: __lowercase= lr_rotation(lowercase__ ) else: node.set_right(insert_node(node.get_right() , lowercase__ ) ) if get_height(node.get_right() ) - get_height(node.get_left() ) == 2: __lowercase= node.get_right() assert right_child is not None if data < right_child.get_data(): __lowercase= rl_rotation(lowercase__ ) else: __lowercase= left_rotation(lowercase__ ) __lowercase= my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(lowercase__ ) return node def _lowerCamelCase( lowercase__ ) -> Any: '''simple docstring''' while True: __lowercase= root.get_right() if right_child is None: break __lowercase= right_child return root.get_data() def _lowerCamelCase( lowercase__ ) -> Any: '''simple docstring''' while True: __lowercase= root.get_left() if left_child is None: break __lowercase= left_child return root.get_data() def _lowerCamelCase( lowercase__ , lowercase__ ) -> MyNode | None: '''simple docstring''' __lowercase= root.get_left() __lowercase= root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: __lowercase= get_left_most(lowercase__ ) root.set_data(lowercase__ ) root.set_right(del_node(lowercase__ , lowercase__ ) ) elif left_child is not None: __lowercase= left_child elif right_child is not None: __lowercase= right_child else: return None elif root.get_data() > data: if left_child is None: print('No such data' ) return root else: root.set_left(del_node(lowercase__ , lowercase__ ) ) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(lowercase__ , lowercase__ ) ) if get_height(lowercase__ ) - get_height(lowercase__ ) == 2: assert right_child is not None if get_height(right_child.get_right() ) > get_height(right_child.get_left() ): __lowercase= left_rotation(lowercase__ ) else: __lowercase= rl_rotation(lowercase__ ) elif get_height(lowercase__ ) - get_height(lowercase__ ) == -2: assert left_child is not None if get_height(left_child.get_left() ) > get_height(left_child.get_right() ): __lowercase= right_rotation(lowercase__ ) else: __lowercase= lr_rotation(lowercase__ ) __lowercase= my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1 root.set_height(lowercase__ ) return root class A : def __init__(self ): __lowercase= None def _A (self ): return get_height(self.root ) def _A (self , lowerCAmelCase ): print('insert:' + str(lowerCAmelCase ) ) __lowercase= insert_node(self.root , lowerCAmelCase ) def _A (self , lowerCAmelCase ): print('delete:' + str(lowerCAmelCase ) ) if self.root is None: print('Tree is empty!' ) return __lowercase= del_node(self.root , lowerCAmelCase ) def __str__(self , ): # a level traversale, gives a more intuitive look on the tree __lowercase= '' __lowercase= MyQueue() q.push(self.root ) __lowercase= self.get_height() if layer == 0: return output __lowercase= 0 while not q.is_empty(): __lowercase= q.pop() __lowercase= ' ' * int(math.pow(2 , layer - 1 ) ) output += space if node is None: output += "*" q.push(lowerCAmelCase ) q.push(lowerCAmelCase ) else: output += str(node.get_data() ) q.push(node.get_left() ) q.push(node.get_right() ) output += space __lowercase= cnt + 1 for i in range(1_0_0 ): if cnt == math.pow(2 , lowerCAmelCase ) - 1: __lowercase= layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def _lowerCamelCase( ) -> None: '''simple docstring''' import doctest doctest.testmod() if __name__ == "__main__": _test() lowerCAmelCase = AVLtree() lowerCAmelCase = list(range(1_0)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
304
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: ''' lowerCAmelCase = '''======= >>>>>>> ''' lowerCAmelCase = [ '''TextEncoderConfig''', '''ByteTextEncoder''', '''SubwordTextEncoder''', '''encoder_config''', '''maybe_build_from_corpus''', '''manual_dir''', ] lowerCAmelCase = [ # (pattern, replacement) # Order is important here for some replacements (R'''tfds\.core''', R'''datasets'''), (R'''tf\.io\.gfile\.GFile''', R'''open'''), (R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''), (R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''), (R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''), (R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''), (R'''tfds\.features\.FeaturesDict\(''', R'''dict('''), (R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''), (R'''tfds\.''', R'''datasets.'''), (R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''), (R'''self\.builder_config''', R'''self.config'''), ] def _lowerCamelCase( lowercase__ ) -> Optional[int]: '''simple docstring''' return ConvertCommand(args.tfds_path , args.datasets_directory ) class A ( A_ ): @staticmethod def _A (lowerCAmelCase ): __lowercase= parser.add_parser( 'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , ) train_parser.add_argument( '--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , ) train_parser.add_argument( '--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' ) train_parser.set_defaults(func=lowerCAmelCase ) def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= get_logger('datasets-cli/converting' ) __lowercase= tfds_path __lowercase= datasets_directory def _A (self ): if os.path.isdir(self._tfds_path ): __lowercase= os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): __lowercase= os.path.dirname(self._tfds_path ) else: raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' ) __lowercase= os.path.abspath(self._datasets_directory ) self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' ) __lowercase= [] __lowercase= [] __lowercase= {} if os.path.isdir(self._tfds_path ): __lowercase= os.listdir(lowerCAmelCase ) else: __lowercase= [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'Looking at file {f_name}' ) __lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase ) __lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase ) if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('Skipping file' ) continue with open(lowerCAmelCase , encoding='utf-8' ) as f: __lowercase= f.readlines() __lowercase= [] __lowercase= False __lowercase= False __lowercase= [] for line in lines: __lowercase= line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: __lowercase= 'import datasets\n' elif "import tensorflow" in out_line: # order is important here __lowercase= '' continue elif "from absl import logging" in out_line: __lowercase= 'from datasets import logging\n' elif "getLogger" in out_line: __lowercase= out_line.replace('getLogger' , 'get_logger' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): __lowercase= True __lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' ) out_lines.append(lowerCAmelCase ) out_lines.append(lowerCAmelCase ) continue else: for pattern, replacement in TO_CONVERT: __lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: __lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) ) __lowercase= 'from . import ' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'Error converting {out_line.strip()}' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: __lowercase= True out_lines.append(lowerCAmelCase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset __lowercase= f_name.replace('.py' , '' ) __lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase ) __lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase ) os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase ) self._logger.info(f'Adding directory {output_dir}' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(lowerCAmelCase ) if needs_manual_update: with_manual_update.append(lowerCAmelCase ) with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f: f.writelines(lowerCAmelCase ) self._logger.info(f'Converted in {output_file}' ) for utils_file in utils_files: try: __lowercase= os.path.basename(lowerCAmelCase ) __lowercase= imports_to_builder_map[f_name.replace('.py' , '' )] self._logger.info(f'Moving {dest_folder} to {utils_file}' ) shutil.copy(lowerCAmelCase , lowerCAmelCase ) except KeyError: self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
304
1
import cva import numpy as np class A : def __init__(self , lowerCAmelCase , lowerCAmelCase ): if k in (0.04, 0.06): __lowercase= k __lowercase= window_size else: raise ValueError('invalid k value' ) def __str__(self ): return str(self.k ) def _A (self , lowerCAmelCase ): __lowercase= cva.imread(lowerCAmelCase , 0 ) __lowercase, __lowercase= img.shape __lowercase= [] __lowercase= img.copy() __lowercase= cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB ) __lowercase, __lowercase= np.gradient(lowerCAmelCase ) __lowercase= dx**2 __lowercase= dy**2 __lowercase= dx * dy __lowercase= 0.04 __lowercase= self.window_size // 2 for y in range(lowerCAmelCase , h - offset ): for x in range(lowerCAmelCase , w - offset ): __lowercase= ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowercase= iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowercase= ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowercase= (wxx * wyy) - (wxy**2) __lowercase= wxx + wyy __lowercase= det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 2_5_5 ) return color_img, corner_list if __name__ == "__main__": lowerCAmelCase = HarrisCorner(0.0_4, 3) lowerCAmelCase ,lowerCAmelCase = edge_detect.detect('''path_to_image''') cva.imwrite('''detect.png''', color_img)
304
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig lowerCAmelCase = { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''', } class A ( A_ ): UpperCamelCase_ : Optional[int] ='''albert''' def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ): super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase ) __lowercase= vocab_size __lowercase= embedding_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_hidden_groups __lowercase= num_attention_heads __lowercase= inner_group_num __lowercase= hidden_act __lowercase= intermediate_size __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= initializer_range __lowercase= layer_norm_eps __lowercase= classifier_dropout_prob __lowercase= position_embedding_type class A ( A_ ): @property def _A (self ): if self.task == "multiple-choice": __lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'} else: __lowercase= {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
304
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''yjernite/retribert-base-uncased''': 5_1_2, } lowerCAmelCase = { '''yjernite/retribert-base-uncased''': {'''do_lower_case''': True}, } class A ( A_ ): UpperCamelCase_ : Optional[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : List[Any] =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[int] =PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Any =RetriBertTokenizer UpperCamelCase_ : Any =['''input_ids''', '''attention_mask'''] def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ): super().__init__( lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , lowerCAmelCase ) != do_lower_case or normalizer_state.get('strip_accents' , lowerCAmelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase ) != tokenize_chinese_chars ): __lowercase= getattr(lowerCAmelCase , normalizer_state.pop('type' ) ) __lowercase= do_lower_case __lowercase= strip_accents __lowercase= tokenize_chinese_chars __lowercase= normalizer_class(**lowerCAmelCase ) __lowercase= do_lower_case def _A (self , lowerCAmelCase , lowerCAmelCase=None ): __lowercase= [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= [self.sep_token_id] __lowercase= [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase ) return tuple(lowerCAmelCase )
304
import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]: '''simple docstring''' __lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' __lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' ) __lowercase= transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ), ] ) __lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ ) return image def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' if "visual_encoder" in key: __lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ ) if "blocks" in key: __lowercase= re.sub(R'blocks' , 'layers' , lowercase__ ) if "attn" in key: __lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ ) if "norm1" in key: __lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ ) if "norm2" in key: __lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ ) if "encoder.norm" in key: __lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ ) if "encoder.patch_embed.proj" in key: __lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ ) if "encoder.pos_embed" in key: __lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ ) if "encoder.cls_token" in key: __lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ ) if "self_attn" in key: __lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ ) return key @torch.no_grad() def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int: '''simple docstring''' if config_path is not None: __lowercase= BlipConfig.from_pretrained(lowercase__ ) else: __lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} ) __lowercase= BlipForConditionalGeneration(lowercase__ ).eval() __lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth' __lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' ) __lowercase= pt_model.eval() __lowercase= pt_model.state_dict() for key in modified_state_dict.copy(): __lowercase= modified_state_dict.pop(lowercase__ ) __lowercase= rename_key(lowercase__ ) __lowercase= value hf_model.load_state_dict(lowercase__ ) __lowercase= 3_8_4 __lowercase= load_demo_image(image_size=lowercase__ , device='cpu' ) __lowercase= BertTokenizer.from_pretrained('bert-base-uncased' ) __lowercase= tokenizer(['a picture of'] ).input_ids __lowercase= hf_model.generate(lowercase__ , lowercase__ ) assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2] __lowercase= hf_model.generate(lowercase__ ) assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(lowercase__ ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' __lowercase= ( 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth' ) __lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' ) vqa_model.eval() __lowercase= vqa_model.state_dict() for key in modified_state_dict.copy(): __lowercase= modified_state_dict.pop(lowercase__ ) __lowercase= rename_key(lowercase__ ) __lowercase= value __lowercase= BlipForQuestionAnswering(lowercase__ ) hf_vqa_model.load_state_dict(lowercase__ ) __lowercase= ['How many dogs are in this image?'] __lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids __lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' ) __lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth' __lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' ) itm_model.eval() __lowercase= itm_model.state_dict() for key in modified_state_dict.copy(): __lowercase= modified_state_dict.pop(lowercase__ ) __lowercase= rename_key(lowercase__ ) __lowercase= value __lowercase= BlipForImageTextRetrieval(lowercase__ ) __lowercase= ['A picture of a woman with a dog sitting in a beach'] __lowercase= tokenizer( lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids hf_itm_model.load_state_dict(lowercase__ ) hf_itm_model.eval() __lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ ) __lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ ) assert out[0].item() == 0.2110_6874_9427_7954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') lowerCAmelCase = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
304
1
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''tokenizer_file''': { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''', }, } lowerCAmelCase = { '''gpt-neox-20b''': 2_0_4_8, } class A ( A_ ): UpperCamelCase_ : Any =VOCAB_FILES_NAMES UpperCamelCase_ : int =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Union[str, Any] =['''input_ids''', '''attention_mask'''] def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase=False , **lowerCAmelCase , ): super().__init__( lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , lowerCAmelCase ) != add_prefix_space: __lowercase= getattr(lowerCAmelCase , pre_tok_state.pop('type' ) ) __lowercase= add_prefix_space __lowercase= pre_tok_class(**lowerCAmelCase ) __lowercase= add_prefix_space def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase ) return tuple(lowerCAmelCase ) def _A (self , lowerCAmelCase ): __lowercase= [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] ) if len(lowerCAmelCase ) > self.model_max_length: __lowercase= input_ids[-self.model_max_length :] return input_ids
304
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1) lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2) @dataclass class A : UpperCamelCase_ : int UpperCamelCase_ : Node | None class A : def __init__(self , lowerCAmelCase ): __lowercase= None for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ): __lowercase= Node(lowerCAmelCase , self.head ) def __iter__(self ): __lowercase= self.head while node: yield node.data __lowercase= node.next_node def __len__(self ): return sum(1 for _ in self ) def __str__(self ): return " -> ".join([str(lowerCAmelCase ) for node in self] ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList: '''simple docstring''' return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
304
1
def _lowerCamelCase( lowercase__ , lowercase__ ) -> int: '''simple docstring''' return int(input_a == input_a == 0 ) def _lowerCamelCase( ) -> None: '''simple docstring''' print('Truth Table of NOR Gate:' ) print('| Input 1 | Input 2 | Output |' ) print(F'| 0 | 0 | {nor_gate(0 , 0 )} |' ) print(F'| 0 | 1 | {nor_gate(0 , 1 )} |' ) print(F'| 1 | 0 | {nor_gate(1 , 0 )} |' ) print(F'| 1 | 1 | {nor_gate(1 , 1 )} |' ) if __name__ == "__main__": import doctest doctest.testmod() main()
304
from __future__ import annotations from collections.abc import Callable def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float: '''simple docstring''' __lowercase= x_start __lowercase= fnc(lowercase__ ) __lowercase= 0.0 for _ in range(lowercase__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area __lowercase= (x_end - x_start) / steps + xa __lowercase= fnc(lowercase__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step __lowercase= xa __lowercase= fxa return area if __name__ == "__main__": def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' return x**3 + x**2 print('''f(x) = x^3 + x^2''') print('''The area between the curve, x = -5, x = 5 and the x axis is:''') lowerCAmelCase = 1_0 while i <= 1_0_0_0_0_0: print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}') i *= 1_0
304
1
import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__="attention" ) -> Dict: '''simple docstring''' __lowercase= params[F'{prefix}/layers_{i}/{layer_name}/key/kernel'] __lowercase= params[F'{prefix}/layers_{i}/{layer_name}/out/kernel'] __lowercase= params[F'{prefix}/layers_{i}/{layer_name}/query/kernel'] __lowercase= params[F'{prefix}/layers_{i}/{layer_name}/value/kernel'] return k, o, q, v def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__=False ) -> Any: '''simple docstring''' if split_mlp_wi: __lowercase= params[F'{prefix}/layers_{i}/mlp/wi_0/kernel'] __lowercase= params[F'{prefix}/layers_{i}/mlp/wi_1/kernel'] __lowercase= (wi_a, wi_a) else: __lowercase= params[F'{prefix}/layers_{i}/mlp/wi/kernel'] __lowercase= params[F'{prefix}/layers_{i}/mlp/wo/kernel'] return wi, wo def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' return params[F'{prefix}/layers_{i}/{layer_name}/scale'] def _lowerCamelCase( lowercase__ , *, lowercase__ , lowercase__ ) -> int: '''simple docstring''' __lowercase= traverse_util.flatten_dict(variables['target'] ) __lowercase= {'/'.join(lowercase__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __lowercase= 'encoder/layers_0/mlp/wi_0/kernel' in old print('Split MLP:' , lowercase__ ) __lowercase= collections.OrderedDict() # Shared embeddings. __lowercase= old['token_embedder/embedding'] # Encoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). __lowercase= tax_layer_norm_lookup(lowercase__ , lowercase__ , 'encoder' , 'pre_attention_layer_norm' ) __lowercase, __lowercase, __lowercase, __lowercase= tax_attention_lookup(lowercase__ , lowercase__ , 'encoder' , 'attention' ) __lowercase= layer_norm __lowercase= k.T __lowercase= o.T __lowercase= q.T __lowercase= v.T # Block i, layer 1 (MLP). __lowercase= tax_layer_norm_lookup(lowercase__ , lowercase__ , 'encoder' , 'pre_mlp_layer_norm' ) __lowercase, __lowercase= tax_mlp_lookup(lowercase__ , lowercase__ , 'encoder' , lowercase__ ) __lowercase= layer_norm if split_mlp_wi: __lowercase= wi[0].T __lowercase= wi[1].T else: __lowercase= wi.T __lowercase= wo.T __lowercase= old[ 'encoder/relpos_bias/rel_embedding' ].T __lowercase= old['encoder/encoder_norm/scale'] if not is_encoder_only: # Decoder. for i in range(lowercase__ ): # Block i, layer 0 (Self Attention). __lowercase= tax_layer_norm_lookup(lowercase__ , lowercase__ , 'decoder' , 'pre_self_attention_layer_norm' ) __lowercase, __lowercase, __lowercase, __lowercase= tax_attention_lookup(lowercase__ , lowercase__ , 'decoder' , 'self_attention' ) __lowercase= layer_norm __lowercase= k.T __lowercase= o.T __lowercase= q.T __lowercase= v.T # Block i, layer 1 (Cross Attention). __lowercase= tax_layer_norm_lookup(lowercase__ , lowercase__ , 'decoder' , 'pre_cross_attention_layer_norm' ) __lowercase, __lowercase, __lowercase, __lowercase= tax_attention_lookup(lowercase__ , lowercase__ , 'decoder' , 'encoder_decoder_attention' ) __lowercase= layer_norm __lowercase= k.T __lowercase= o.T __lowercase= q.T __lowercase= v.T # Block i, layer 2 (MLP). __lowercase= tax_layer_norm_lookup(lowercase__ , lowercase__ , 'decoder' , 'pre_mlp_layer_norm' ) __lowercase, __lowercase= tax_mlp_lookup(lowercase__ , lowercase__ , 'decoder' , lowercase__ ) __lowercase= layer_norm if split_mlp_wi: __lowercase= wi[0].T __lowercase= wi[1].T else: __lowercase= wi.T __lowercase= wo.T __lowercase= old['decoder/decoder_norm/scale'] __lowercase= old[ 'decoder/relpos_bias/rel_embedding' ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __lowercase= old['decoder/logits_dense/kernel'].T return new def _lowerCamelCase( lowercase__ , lowercase__ ) -> str: '''simple docstring''' __lowercase= collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __lowercase= state_dict['shared.weight'] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __lowercase= state_dict['shared.weight'] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('Using shared word embeddings as lm_head.' ) __lowercase= state_dict['shared.weight'] return state_dict def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]: '''simple docstring''' __lowercase= checkpoints.load_tax_checkpoint(lowercase__ ) __lowercase= convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ ) __lowercase= make_state_dict(lowercase__ , lowercase__ ) model.load_state_dict(lowercase__ , strict=lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ) -> int: '''simple docstring''' __lowercase= TaConfig.from_json_file(lowercase__ ) print(F'Building PyTorch model from configuration: {config}' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __lowercase= TaEncoderModel(lowercase__ ) else: __lowercase= TaForConditionalGeneration(lowercase__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(lowercase__ ) # Verify that we can load the checkpoint. model.from_pretrained(lowercase__ ) print('Done' ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) lowerCAmelCase = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
304
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_input_lengths __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= gelu_activation __lowercase= sinusoidal_embeddings __lowercase= causal __lowercase= asm __lowercase= n_langs __lowercase= vocab_size __lowercase= n_special __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= summary_type __lowercase= use_proj __lowercase= scope __lowercase= bos_token_id def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= random_attention_mask([self.batch_size, self.seq_length] ) __lowercase= None if self.use_input_lengths: __lowercase= ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , 2 ).float() __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _A (self ): return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , langs=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMWithLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) __lowercase= outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForQuestionAnswering(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model( lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , ) __lowercase= model( lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , ) ((__lowercase), )= result_with_labels.to_tuple() __lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) ((__lowercase), )= result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= self.num_labels __lowercase= XLMForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= self.num_choices __lowercase= XLMForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : int =( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) UpperCamelCase_ : Dict =( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable UpperCamelCase_ : str =( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def _A (self ): __lowercase= XLMModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ): self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertListEqual( [isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) ) self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(lowerCAmelCase ): # adds PAD dummy token __lowercase= min_length + idx + 1 __lowercase= min_length + idx + 1 __lowercase= ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ): self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertListEqual( [isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , ) self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(lowerCAmelCase ): # adds PAD dummy token __lowercase= min_length + idx + 1 __lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , ) pass @slow def _A (self ): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= XLMModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(lowerCAmelCase ) __lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president __lowercase= [ 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference __lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
304
1
import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class A ( A_ ): # to overwrite at feature extractactor specific tests UpperCamelCase_ : List[Any] =None UpperCamelCase_ : Optional[Any] =None @property def _A (self ): return self.feat_extract_tester.prepare_feat_extract_dict() def _A (self ): __lowercase= self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(lowerCAmelCase , 'feature_size' ) ) self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate' ) ) self.assertTrue(hasattr(lowerCAmelCase , 'padding_value' ) ) def _A (self ): __lowercase= self.feat_extract_tester.prepare_inputs_for_common() __lowercase= self.feature_extraction_class(**self.feat_extract_dict ) __lowercase= feat_extract.model_input_names[0] __lowercase= BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(lowerCAmelCase ) == len(lowerCAmelCase ) for x, y in zip(lowerCAmelCase , processed_features[input_name] ) ) ) __lowercase= self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase ) __lowercase= BatchFeature({input_name: speech_inputs} , tensor_type='np' ) __lowercase= processed_features[input_name] if len(batch_features_input.shape ) < 3: __lowercase= batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def _A (self ): __lowercase= self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase ) __lowercase= self.feature_extraction_class(**self.feat_extract_dict ) __lowercase= feat_extract.model_input_names[0] __lowercase= BatchFeature({input_name: speech_inputs} , tensor_type='pt' ) __lowercase= processed_features[input_name] if len(batch_features_input.shape ) < 3: __lowercase= batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def _A (self ): __lowercase= self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase ) __lowercase= self.feature_extraction_class(**self.feat_extract_dict ) __lowercase= feat_extract.model_input_names[0] __lowercase= BatchFeature({input_name: speech_inputs} , tensor_type='tf' ) __lowercase= processed_features[input_name] if len(batch_features_input.shape ) < 3: __lowercase= batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def _A (self , lowerCAmelCase=False ): def _inputs_have_equal_length(lowerCAmelCase ): __lowercase= len(input[0] ) for input_slice in input[1:]: if len(lowerCAmelCase ) != length: return False return True def _inputs_are_equal(lowerCAmelCase , lowerCAmelCase ): if len(lowerCAmelCase ) != len(lowerCAmelCase ): return False for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase ): if not np.allclose(np.asarray(lowerCAmelCase ) , np.asarray(lowerCAmelCase ) , atol=1E-3 ): return False return True __lowercase= self.feature_extraction_class(**self.feat_extract_dict ) __lowercase= self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase ) __lowercase= feat_extract.model_input_names[0] __lowercase= BatchFeature({input_name: speech_inputs} ) __lowercase= self.feat_extract_tester.seq_length_diff __lowercase= self.feat_extract_tester.max_seq_length + pad_diff __lowercase= self.feat_extract_tester.min_seq_length __lowercase= self.feat_extract_tester.batch_size __lowercase= self.feat_extract_tester.feature_size # test padding for List[int] + numpy __lowercase= feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase ) __lowercase= input_a[input_name] __lowercase= feat_extract.pad(lowerCAmelCase , padding='longest' ) __lowercase= input_a[input_name] __lowercase= feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1] ) ) __lowercase= input_a[input_name] __lowercase= feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np' ) __lowercase= input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(lowerCAmelCase ): feat_extract.pad(lowerCAmelCase , padding='max_length' )[input_name] __lowercase= feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np' ) __lowercase= input_a[input_name] self.assertFalse(_inputs_have_equal_length(lowerCAmelCase ) ) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase ) ) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase ) ) self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy __lowercase= feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=1_0 ) __lowercase= input_a[input_name] __lowercase= feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=1_0 ) __lowercase= input_a[input_name] __lowercase= feat_extract.pad( lowerCAmelCase , padding='max_length' , pad_to_multiple_of=1_0 , max_length=lowerCAmelCase ) __lowercase= input_a[input_name] __lowercase= feat_extract.pad( lowerCAmelCase , padding='max_length' , pad_to_multiple_of=1_0 , max_length=lowerCAmelCase , return_tensors='np' , ) __lowercase= input_a[input_name] self.assertTrue(all(len(lowerCAmelCase ) % 1_0 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase ) ) __lowercase= pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0 self.assertTrue(all(len(lowerCAmelCase ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct __lowercase= (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1E-3 ) def _A (self , lowerCAmelCase=False ): def _inputs_have_equal_length(lowerCAmelCase ): __lowercase= len(input[0] ) for input_slice in input[1:]: if len(lowerCAmelCase ) != length: return False return True def _inputs_are_equal(lowerCAmelCase , lowerCAmelCase ): if len(lowerCAmelCase ) != len(lowerCAmelCase ): return False for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase ): if not np.allclose(np.asarray(lowerCAmelCase ) , np.asarray(lowerCAmelCase ) , atol=1E-3 ): return False return True __lowercase= self.feature_extraction_class(**self.feat_extract_dict ) __lowercase= self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase ) __lowercase= feat_extract.model_input_names[0] __lowercase= BatchFeature({input_name: speech_inputs} ) # truncate to smallest __lowercase= feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=lowerCAmelCase ) __lowercase= input_a[input_name] __lowercase= feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) ) __lowercase= input_a[input_name] self.assertTrue(_inputs_have_equal_length(lowerCAmelCase ) ) self.assertFalse(_inputs_have_equal_length(lowerCAmelCase ) ) # truncate to smallest with np __lowercase= feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=lowerCAmelCase , ) __lowercase= input_a[input_name] __lowercase= feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' ) __lowercase= input_a[input_name] self.assertTrue(_inputs_have_equal_length(lowerCAmelCase ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(lowerCAmelCase ) ) # truncate to middle __lowercase= feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=lowerCAmelCase , return_tensors='np' , ) __lowercase= input_a[input_name] __lowercase= feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=lowerCAmelCase ) __lowercase= input_a[input_name] __lowercase= feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' ) __lowercase= input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase ) ) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase ) ) self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(lowerCAmelCase ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(lowerCAmelCase ): feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(lowerCAmelCase ): feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(lowerCAmelCase ): feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(lowerCAmelCase ): feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy __lowercase= 1_2 __lowercase= feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , ) __lowercase= input_a[input_name] __lowercase= feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCAmelCase , ) __lowercase= input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of __lowercase= len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: __lowercase= ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase ) ) self.assertFalse(_inputs_have_equal_length(lowerCAmelCase ) ) def _A (self ): self._check_padding(numpify=lowerCAmelCase ) def _A (self ): self._check_padding(numpify=lowerCAmelCase ) def _A (self ): self._check_truncation(numpify=lowerCAmelCase ) def _A (self ): self._check_truncation(numpify=lowerCAmelCase ) @require_torch def _A (self ): __lowercase= self.feature_extraction_class(**self.feat_extract_dict ) __lowercase= self.feat_extract_tester.prepare_inputs_for_common() __lowercase= feat_extract.model_input_names[0] __lowercase= BatchFeature({input_name: speech_inputs} ) __lowercase= feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np' )[input_name] __lowercase= feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) @require_tf def _A (self ): __lowercase= self.feature_extraction_class(**self.feat_extract_dict ) __lowercase= self.feat_extract_tester.prepare_inputs_for_common() __lowercase= feat_extract.model_input_names[0] __lowercase= BatchFeature({input_name: speech_inputs} ) __lowercase= feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np' )[input_name] __lowercase= feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def _A (self ): __lowercase= self.feat_extract_dict __lowercase= True __lowercase= self.feature_extraction_class(**lowerCAmelCase ) __lowercase= self.feat_extract_tester.prepare_inputs_for_common() __lowercase= [len(lowerCAmelCase ) for x in speech_inputs] __lowercase= feat_extract.model_input_names[0] __lowercase= BatchFeature({input_name: speech_inputs} ) __lowercase= feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np' ) self.assertIn('attention_mask' , lowerCAmelCase ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase ) def _A (self ): __lowercase= self.feat_extract_dict __lowercase= True __lowercase= self.feature_extraction_class(**lowerCAmelCase ) __lowercase= self.feat_extract_tester.prepare_inputs_for_common() __lowercase= [len(lowerCAmelCase ) for x in speech_inputs] __lowercase= feat_extract.model_input_names[0] __lowercase= BatchFeature({input_name: speech_inputs} ) __lowercase= min(lowerCAmelCase ) __lowercase= feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np' ) self.assertIn('attention_mask' , lowerCAmelCase ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
304
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent lowerCAmelCase = {'''UserAgent''': UserAgent().random} def _lowerCamelCase( lowercase__ ) -> dict: '''simple docstring''' __lowercase= script.contents[0] __lowercase= json.loads(data[data.find('{"config"' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class A : def __init__(self , lowerCAmelCase ): __lowercase= f'https://www.instagram.com/{username}/' __lowercase= self.get_json() def _A (self ): __lowercase= requests.get(self.url , headers=lowerCAmelCase ).text __lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__(self ): return f'{self.__class__.__name__}(\'{self.username}\')' def __str__(self ): return f'{self.fullname} ({self.username}) is {self.biography}' @property def _A (self ): return self.user_data["username"] @property def _A (self ): return self.user_data["full_name"] @property def _A (self ): return self.user_data["biography"] @property def _A (self ): return self.user_data["business_email"] @property def _A (self ): return self.user_data["external_url"] @property def _A (self ): return self.user_data["edge_followed_by"]["count"] @property def _A (self ): return self.user_data["edge_follow"]["count"] @property def _A (self ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def _A (self ): return self.user_data["profile_pic_url_hd"] @property def _A (self ): return self.user_data["is_verified"] @property def _A (self ): return self.user_data["is_private"] def _lowerCamelCase( lowercase__ = "github" ) -> None: '''simple docstring''' import os if os.environ.get('CI' ): return # test failing on GitHub Actions __lowercase= InstagramUser(lowercase__ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , lowercase__ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_5_0 assert instagram_user.number_of_followers > 1_2_0_0_0_0 assert instagram_user.number_of_followings > 1_5 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('https://instagram.' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase = InstagramUser('''github''') print(instagram_user) print(F'{instagram_user.number_of_posts = }') print(F'{instagram_user.number_of_followers = }') print(F'{instagram_user.number_of_followings = }') print(F'{instagram_user.email = }') print(F'{instagram_user.website = }') print(F'{instagram_user.profile_picture_url = }') print(F'{instagram_user.is_verified = }') print(F'{instagram_user.is_private = }')
304
1
import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json lowerCAmelCase = '''sshleifer/mar_enro_6_3_student''' class A ( A_ ): def _A (self ): super().setUp() __lowercase= cached_path( 'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=lowerCAmelCase , ) __lowercase= f'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k' @slow @require_torch_gpu def _A (self ): MarianMTModel.from_pretrained(lowerCAmelCase ) @slow @require_torch_gpu def _A (self ): __lowercase= { '$MAX_LEN': 6_4, '$BS': 6_4, '$GAS': 1, '$ENRO_DIR': self.data_dir, 'facebook/mbart-large-cc25': MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", '--learning_rate=3e-5': '--learning_rate 3e-4', '--num_train_epochs 6': '--num_train_epochs 1', } # Clean up bash script __lowercase= (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip() __lowercase= bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' ) for k, v in env_vars_to_replace.items(): __lowercase= bash_script.replace(lowerCAmelCase , str(lowerCAmelCase ) ) __lowercase= self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") __lowercase= f'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split() # XXX: args.gpus > 1 : handle multi_gpu in the future __lowercase= ['finetune.py'] + bash_script.split() + args with patch.object(lowerCAmelCase , 'argv' , lowerCAmelCase ): __lowercase= argparse.ArgumentParser() __lowercase= pl.Trainer.add_argparse_args(lowerCAmelCase ) __lowercase= SummarizationModule.add_model_specific_args(lowerCAmelCase , os.getcwd() ) __lowercase= parser.parse_args() __lowercase= main(lowerCAmelCase ) # Check metrics __lowercase= load_json(model.metrics_save_path ) __lowercase= metrics['val'][0] __lowercase= metrics['val'][-1] self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] , lowerCAmelCase ) self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats['val_avg_bleu'] , 1_7 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict __lowercase= os.listdir(lowerCAmelCase ) __lowercase= [x for x in contents if x.endswith('.ckpt' )][0] __lowercase= os.path.join(args.output_dir , lowerCAmelCase ) __lowercase= torch.load(lowerCAmelCase , map_location='cpu' ) __lowercase= 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: __lowercase= {os.path.basename(lowerCAmelCase ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['test'] ) == 1 class A ( A_ ): @timeout_decorator.timeout(6_0_0 ) @slow @require_torch_gpu def _A (self ): __lowercase= f'{self.test_file_dir_str}/test_data/wmt_en_ro' __lowercase= { '--fp16_opt_level=O1': '', '$MAX_LEN': 1_2_8, '$BS': 1_6, '$GAS': 1, '$ENRO_DIR': data_dir, '$m': 'sshleifer/student_marian_en_ro_6_1', 'val_check_interval=0.25': 'val_check_interval=1.0', } # Clean up bash script __lowercase= ( (self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip() ) __lowercase= bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' ) __lowercase= bash_script.replace('--fp16 ' , ' ' ) for k, v in env_vars_to_replace.items(): __lowercase= bash_script.replace(lowerCAmelCase , str(lowerCAmelCase ) ) __lowercase= self.get_auto_remove_tmp_dir() __lowercase= bash_script.replace('--fp16' , '' ) __lowercase= 6 __lowercase= ( ['distillation.py'] + bash_script.split() + [ f'--output_dir={output_dir}', '--gpus=1', '--learning_rate=1e-3', f'--num_train_epochs={epochs}', '--warmup_steps=10', '--val_check_interval=1.0', '--do_predict', ] ) with patch.object(lowerCAmelCase , 'argv' , lowerCAmelCase ): __lowercase= argparse.ArgumentParser() __lowercase= pl.Trainer.add_argparse_args(lowerCAmelCase ) __lowercase= SummarizationDistiller.add_model_specific_args(lowerCAmelCase , os.getcwd() ) __lowercase= parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu __lowercase= distill_main(lowerCAmelCase ) # Check metrics __lowercase= load_json(model.metrics_save_path ) __lowercase= metrics['val'][0] __lowercase= metrics['val'][-1] assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] , lowerCAmelCase ) # check lightning ckpt can be loaded and has a reasonable statedict __lowercase= os.listdir(lowerCAmelCase ) __lowercase= [x for x in contents if x.endswith('.ckpt' )][0] __lowercase= os.path.join(args.output_dir , lowerCAmelCase ) __lowercase= torch.load(lowerCAmelCase , map_location='cpu' ) __lowercase= 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: __lowercase= {os.path.basename(lowerCAmelCase ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['test'] ) == 1
304
from typing import Any import numpy as np def _lowerCamelCase( lowercase__ ) -> bool: '''simple docstring''' return np.array_equal(lowercase__ , matrix.conjugate().T ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' __lowercase= v.conjugate().T __lowercase= v_star.dot(lowercase__ ) assert isinstance(lowercase__ , np.ndarray ) return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ )) def _lowerCamelCase( ) -> None: '''simple docstring''' __lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) __lowercase= np.array([[1], [2], [3]] ) assert is_hermitian(lowercase__ ), F'{a} is not hermitian.' print(rayleigh_quotient(lowercase__ , lowercase__ ) ) __lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(lowercase__ ), F'{a} is not hermitian.' assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
304
1
import argparse from collections import defaultdict import yaml lowerCAmelCase = '''docs/source/en/_toctree.yml''' def _lowerCamelCase( lowercase__ ) -> Optional[int]: '''simple docstring''' __lowercase= defaultdict(lowercase__ ) __lowercase= [] __lowercase= [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({'local': doc['local'], 'title': doc['title']} ) else: new_doc_list.append(lowercase__ ) __lowercase= new_doc_list __lowercase= [key for key, value in counts.items() if value > 1] __lowercase= [] for duplicate_key in duplicates: __lowercase= list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} ) if len(lowercase__ ) > 1: raise ValueError( F'{duplicate_key} is present several times in the documentation table of content at ' '`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ' 'others.' ) # Only add this once new_doc.append({'local': duplicate_key, 'title': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] ) __lowercase= sorted(lowercase__ , key=lambda lowercase__ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(lowercase__ ) > 1: raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' ) overview_doc.extend(lowercase__ ) # Sort return overview_doc def _lowerCamelCase( lowercase__=False ) -> List[str]: '''simple docstring''' with open(lowercase__ , encoding='utf-8' ) as f: __lowercase= yaml.safe_load(f.read() ) # Get to the API doc __lowercase= 0 while content[api_idx]["title"] != "API": api_idx += 1 __lowercase= content[api_idx]['sections'] # Then to the model doc __lowercase= 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 __lowercase= api_doc[scheduler_idx]['sections'] __lowercase= clean_doc_toc(lowercase__ ) __lowercase= False if new_scheduler_doc != scheduler_doc: __lowercase= True if overwrite: __lowercase= new_scheduler_doc if diff: if overwrite: __lowercase= api_doc with open(lowercase__ , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(lowercase__ , allow_unicode=lowercase__ ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) def _lowerCamelCase( lowercase__=False ) -> Union[str, Any]: '''simple docstring''' with open(lowercase__ , encoding='utf-8' ) as f: __lowercase= yaml.safe_load(f.read() ) # Get to the API doc __lowercase= 0 while content[api_idx]["title"] != "API": api_idx += 1 __lowercase= content[api_idx]['sections'] # Then to the model doc __lowercase= 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 __lowercase= False __lowercase= api_doc[pipeline_idx]['sections'] __lowercase= [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: __lowercase= pipeline_doc['section'] __lowercase= clean_doc_toc(lowercase__ ) if overwrite: __lowercase= new_sub_pipeline_doc new_pipeline_docs.append(lowercase__ ) # sort overall pipeline doc __lowercase= clean_doc_toc(lowercase__ ) if new_pipeline_docs != pipeline_docs: __lowercase= True if overwrite: __lowercase= new_pipeline_docs if diff: if overwrite: __lowercase= api_doc with open(lowercase__ , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(lowercase__ , allow_unicode=lowercase__ ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') lowerCAmelCase = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
304
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase = logging.get_logger(__name__) class A ( A_ ): UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask'''] def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ): super().__init__( feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= spectrogram_length __lowercase= num_channels __lowercase= patch_size __lowercase= feature_size // self.patch_size[1] __lowercase= n_fft __lowercase= sampling_rate // hop_length_to_sampling_rate __lowercase= sampling_rate __lowercase= padding_value __lowercase= mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T def _A (self , lowerCAmelCase ): __lowercase= spectrogram( lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , ) __lowercase= log_spec[:, :-1] __lowercase= log_spec - 20.0 __lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled' f' with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) __lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) __lowercase= is_batched_numpy or ( isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ): __lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa ) elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __lowercase= raw_speech.astype(np.floataa ) # always return batch if not is_batched: __lowercase= [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __lowercase= [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , lowerCAmelCase ): __lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __lowercase= max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __lowercase= [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __lowercase= np.array(lowerCAmelCase ).astype(np.floataa ) # convert into correct format for padding __lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __lowercase= padded_audio_features * self.padding_value for i in range(len(lowerCAmelCase ) ): __lowercase= audio_features[i] __lowercase= feature # return as BatchFeature if return_attention_mask: __lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: __lowercase= {'audio_values': padded_audio_features} __lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase ) return encoded_inputs
304
1
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> list: '''simple docstring''' __lowercase= [] __lowercase, __lowercase= input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) __lowercase= result + left + right return input_list def _lowerCamelCase( lowercase__ ) -> list: '''simple docstring''' if len(lowercase__ ) <= 1: return input_list __lowercase= list(lowercase__ ) # iteration for two-way merging __lowercase= 2 while p <= len(lowercase__ ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(lowercase__ ) , lowercase__ ): __lowercase= i __lowercase= i + p - 1 __lowercase= (low + high + 1) // 2 __lowercase= merge(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # final merge of last two parts if p * 2 >= len(lowercase__ ): __lowercase= i __lowercase= merge(lowercase__ , 0 , lowercase__ , len(lowercase__ ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip() if user_input == "": lowerCAmelCase = [] else: lowerCAmelCase = [int(item.strip()) for item in user_input.split(''',''')] print(iter_merge_sort(unsorted))
304
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' __lowercase= create_tensor(lowercase__ ) __lowercase= gather(lowercase__ ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' __lowercase= [state.process_index] __lowercase= gather_object(lowercase__ ) assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}' assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}' def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' __lowercase= create_tensor(lowercase__ ) __lowercase= broadcast(lowercase__ ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def _lowerCamelCase( lowercase__ ) -> List[Any]: '''simple docstring''' if state.is_main_process: __lowercase= torch.arange(state.num_processes + 1 ).to(state.device ) else: __lowercase= torch.arange(state.num_processes ).to(state.device ) __lowercase= pad_across_processes(lowercase__ ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def _lowerCamelCase( lowercase__ ) -> Any: '''simple docstring''' if state.num_processes != 2: return __lowercase= create_tensor(lowercase__ ) __lowercase= reduce(lowercase__ , 'sum' ) __lowercase= torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}' def _lowerCamelCase( lowercase__ ) -> Union[str, Any]: '''simple docstring''' if state.num_processes != 2: return __lowercase= create_tensor(lowercase__ ) __lowercase= reduce(lowercase__ , 'mean' ) __lowercase= torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}' def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' main() def _lowerCamelCase( ) -> List[str]: '''simple docstring''' __lowercase= PartialState() state.print(F'State: {state}' ) state.print('testing gather' ) test_gather(lowercase__ ) state.print('testing gather_object' ) test_gather_object(lowercase__ ) state.print('testing broadcast' ) test_broadcast(lowercase__ ) state.print('testing pad_across_processes' ) test_pad_across_processes(lowercase__ ) state.print('testing reduce_sum' ) test_reduce_sum(lowercase__ ) state.print('testing reduce_mean' ) test_reduce_mean(lowercase__ ) if __name__ == "__main__": main()
304
1
import argparse lowerCAmelCase = '''docs/source/_static/js/custom.js''' def _lowerCamelCase( lowercase__ ) -> Any: '''simple docstring''' with open(lowercase__ , encoding='utf-8' , newline='\n' ) as f: __lowercase= f.readlines() __lowercase= 0 # First let's put the right version while not lines[index].startswith('const stableVersion =' ): index += 1 __lowercase= F'const stableVersion = "v{version}"\n' # Then update the dictionary while not lines[index].startswith('const versionMapping = {' ): index += 1 # We go until the end while not lines[index].startswith('}' ): index += 1 # We add the new version at the end lines[index - 1] += F' "v{version}": "v{version}",\n' with open(lowercase__ , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(lowercase__ ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--version''', help='''Release version.''') lowerCAmelCase = parser.parse_args() update_custom_js(args.version)
304
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class A ( A_ ): UpperCamelCase_ : torch.FloatTensor UpperCamelCase_ : torch.FloatTensor class A ( A_ , A_ ): UpperCamelCase_ : Dict =1 @register_to_config def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ): # standard deviation of the initial noise distribution __lowercase= sigma_max # setable values __lowercase= None self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = None ): return sample def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ): __lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps __lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ): __lowercase= sigma_min if sigma_min is not None else self.config.sigma_min __lowercase= sigma_max if sigma_max is not None else self.config.sigma_max __lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase , lowerCAmelCase ) __lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) __lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) ) __lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def _A (self , lowerCAmelCase , lowerCAmelCase ): return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) __lowercase= timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) __lowercase= (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda __lowercase= timesteps.to(self.discrete_sigmas.device ) __lowercase= self.discrete_sigmas[timesteps].to(sample.device ) __lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device ) __lowercase= torch.zeros_like(lowerCAmelCase ) __lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods __lowercase= diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): __lowercase= diffusion.unsqueeze(-1 ) __lowercase= drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of __lowercase= randn_tensor( sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype ) __lowercase= sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? __lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction __lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr __lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() __lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() __lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2 __lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term __lowercase= step_size.flatten() while len(step_size.shape ) < len(sample.shape ): __lowercase= step_size.unsqueeze(-1 ) __lowercase= sample + step_size * model_output __lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples __lowercase= timesteps.to(original_samples.device ) __lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps] __lowercase= ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None] ) __lowercase= noise + original_samples return noisy_samples def __len__(self ): return self.config.num_train_timesteps
304
1
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: ''' lowerCAmelCase = '''======= >>>>>>> ''' lowerCAmelCase = [ '''TextEncoderConfig''', '''ByteTextEncoder''', '''SubwordTextEncoder''', '''encoder_config''', '''maybe_build_from_corpus''', '''manual_dir''', ] lowerCAmelCase = [ # (pattern, replacement) # Order is important here for some replacements (R'''tfds\.core''', R'''datasets'''), (R'''tf\.io\.gfile\.GFile''', R'''open'''), (R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''), (R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''), (R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''), (R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''), (R'''tfds\.features\.FeaturesDict\(''', R'''dict('''), (R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''), (R'''tfds\.''', R'''datasets.'''), (R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''), (R'''self\.builder_config''', R'''self.config'''), ] def _lowerCamelCase( lowercase__ ) -> Optional[int]: '''simple docstring''' return ConvertCommand(args.tfds_path , args.datasets_directory ) class A ( A_ ): @staticmethod def _A (lowerCAmelCase ): __lowercase= parser.add_parser( 'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , ) train_parser.add_argument( '--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , ) train_parser.add_argument( '--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' ) train_parser.set_defaults(func=lowerCAmelCase ) def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= get_logger('datasets-cli/converting' ) __lowercase= tfds_path __lowercase= datasets_directory def _A (self ): if os.path.isdir(self._tfds_path ): __lowercase= os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): __lowercase= os.path.dirname(self._tfds_path ) else: raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' ) __lowercase= os.path.abspath(self._datasets_directory ) self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' ) __lowercase= [] __lowercase= [] __lowercase= {} if os.path.isdir(self._tfds_path ): __lowercase= os.listdir(lowerCAmelCase ) else: __lowercase= [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'Looking at file {f_name}' ) __lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase ) __lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase ) if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('Skipping file' ) continue with open(lowerCAmelCase , encoding='utf-8' ) as f: __lowercase= f.readlines() __lowercase= [] __lowercase= False __lowercase= False __lowercase= [] for line in lines: __lowercase= line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: __lowercase= 'import datasets\n' elif "import tensorflow" in out_line: # order is important here __lowercase= '' continue elif "from absl import logging" in out_line: __lowercase= 'from datasets import logging\n' elif "getLogger" in out_line: __lowercase= out_line.replace('getLogger' , 'get_logger' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): __lowercase= True __lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' ) out_lines.append(lowerCAmelCase ) out_lines.append(lowerCAmelCase ) continue else: for pattern, replacement in TO_CONVERT: __lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: __lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) ) __lowercase= 'from . import ' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'Error converting {out_line.strip()}' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: __lowercase= True out_lines.append(lowerCAmelCase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset __lowercase= f_name.replace('.py' , '' ) __lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase ) __lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase ) os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase ) self._logger.info(f'Adding directory {output_dir}' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(lowerCAmelCase ) if needs_manual_update: with_manual_update.append(lowerCAmelCase ) with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f: f.writelines(lowerCAmelCase ) self._logger.info(f'Converted in {output_file}' ) for utils_file in utils_files: try: __lowercase= os.path.basename(lowerCAmelCase ) __lowercase= imports_to_builder_map[f_name.replace('.py' , '' )] self._logger.info(f'Moving {dest_folder} to {utils_file}' ) shutil.copy(lowerCAmelCase , lowerCAmelCase ) except KeyError: self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
304
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device lowerCAmelCase = False class A ( unittest.TestCase ): pass @nightly @require_torch_gpu class A ( unittest.TestCase ): def _A (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _A (self ): __lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) __lowercase= torch.manual_seed(0 ) __lowercase= pipe.dual_guided( prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCAmelCase ) __lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= generator.manual_seed(0 ) __lowercase= pipe.dual_guided( prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def _A (self ): __lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= 'cyberpunk 2077' __lowercase= load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) __lowercase= torch.manual_seed(0 ) __lowercase= pipe.dual_guided( prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images __lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 __lowercase= 'A painting of a squirrel eating a burger ' __lowercase= torch.manual_seed(0 ) __lowercase= pipe.text_to_image( prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images __lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 __lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images __lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
304
1
import operator def _lowerCamelCase( lowercase__ , lowercase__ = False , lowercase__ = None ) -> list: '''simple docstring''' __lowercase= operator.lt if reverse else operator.gt __lowercase= solution or [] if not arr: return solution __lowercase= [arr.pop(0 )] for i, item in enumerate(lowercase__ ): if _operator(lowercase__ , sublist[-1] ): sublist.append(lowercase__ ) arr.pop(lowercase__ ) # merging sublist into solution list if not solution: solution.extend(lowercase__ ) else: while sublist: __lowercase= sublist.pop(0 ) for i, xx in enumerate(lowercase__ ): if not _operator(lowercase__ , lowercase__ ): solution.insert(lowercase__ , lowercase__ ) break else: solution.append(lowercase__ ) strand_sort(lowercase__ , lowercase__ , lowercase__ ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
304
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase = { '''configuration_xmod''': [ '''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XmodConfig''', '''XmodOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XmodForCausalLM''', '''XmodForMaskedLM''', '''XmodForMultipleChoice''', '''XmodForQuestionAnswering''', '''XmodForSequenceClassification''', '''XmodForTokenClassification''', '''XmodModel''', '''XmodPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
304
1
import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A ( A_ , unittest.TestCase ): UpperCamelCase_ : int =DDIMPipeline UpperCamelCase_ : Dict =UNCONDITIONAL_IMAGE_GENERATION_PARAMS UpperCamelCase_ : Optional[int] =PipelineTesterMixin.required_optional_params - { '''num_images_per_prompt''', '''latents''', '''callback''', '''callback_steps''', } UpperCamelCase_ : Union[str, Any] =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS UpperCamelCase_ : Any =False def _A (self ): torch.manual_seed(0 ) __lowercase= UNetaDModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) __lowercase= DDIMScheduler() __lowercase= {'unet': unet, 'scheduler': scheduler} return components def _A (self , lowerCAmelCase , lowerCAmelCase=0 ): if str(lowerCAmelCase ).startswith('mps' ): __lowercase= torch.manual_seed(lowerCAmelCase ) else: __lowercase= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) __lowercase= { 'batch_size': 1, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def _A (self ): __lowercase= 'cpu' __lowercase= self.get_dummy_components() __lowercase= self.pipeline_class(**lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= self.get_dummy_inputs(lowerCAmelCase ) __lowercase= pipe(**lowerCAmelCase ).images __lowercase= image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 3_2, 3_2, 3) ) __lowercase= np.array( [1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] ) __lowercase= np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCAmelCase , 1E-3 ) def _A (self ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def _A (self ): super().test_save_load_local(expected_max_difference=3E-3 ) def _A (self ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def _A (self ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class A ( unittest.TestCase ): def _A (self ): __lowercase= 'google/ddpm-cifar10-32' __lowercase= UNetaDModel.from_pretrained(lowerCAmelCase ) __lowercase= DDIMScheduler() __lowercase= DDIMPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase ) ddim.to(lowerCAmelCase ) ddim.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= torch.manual_seed(0 ) __lowercase= ddim(generator=lowerCAmelCase , eta=0.0 , output_type='numpy' ).images __lowercase= image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) __lowercase= np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _A (self ): __lowercase= 'google/ddpm-ema-bedroom-256' __lowercase= UNetaDModel.from_pretrained(lowerCAmelCase ) __lowercase= DDIMScheduler.from_pretrained(lowerCAmelCase ) __lowercase= DDIMPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase ) ddpm.to(lowerCAmelCase ) ddpm.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= torch.manual_seed(0 ) __lowercase= ddpm(generator=lowerCAmelCase , output_type='numpy' ).images __lowercase= image[0, -3:, -3:, -1] assert image.shape == (1, 2_5_6, 2_5_6, 3) __lowercase= np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
304
import math from datetime import datetime, timedelta def _lowerCamelCase( lowercase__ ) -> datetime: '''simple docstring''' __lowercase= year % 1_9 __lowercase= year % 4 __lowercase= year % 7 __lowercase= math.floor(year / 1_0_0 ) __lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 ) __lowercase= leap_day_inhibits / 4 __lowercase= ( 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 3_0 __lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 __lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0 # PHM -> Paschal Full Moon __lowercase= ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 2_9 and days_from_phm_to_sunday == 6: return datetime(lowercase__ , 4 , 1_9 ) elif days_to_add == 2_8 and days_from_phm_to_sunday == 6: return datetime(lowercase__ , 4 , 1_8 ) else: return datetime(lowercase__ , 3 , 2_2 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3): lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was''' print(F'Easter in {year} {tense} {gauss_easter(year)}')
304
1
import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() lowerCAmelCase = logging.get_logger('''transformers.models.speecht5''') def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str: '''simple docstring''' hf_model.apply_weight_norm() __lowercase= checkpoint['input_conv.weight_g'] __lowercase= checkpoint['input_conv.weight_v'] __lowercase= checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates ) ): __lowercase= checkpoint[F'upsamples.{i}.1.weight_g'] __lowercase= checkpoint[F'upsamples.{i}.1.weight_v'] __lowercase= checkpoint[F'upsamples.{i}.1.bias'] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): __lowercase= checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g'] __lowercase= checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v'] __lowercase= checkpoint[F'blocks.{i}.convs1.{j}.1.bias'] __lowercase= checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g'] __lowercase= checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v'] __lowercase= checkpoint[F'blocks.{i}.convs2.{j}.1.bias'] __lowercase= checkpoint['output_conv.1.weight_g'] __lowercase= checkpoint['output_conv.1.weight_v'] __lowercase= checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() @torch.no_grad() def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , ) -> int: '''simple docstring''' if config_path is not None: __lowercase= SpeechTaHifiGanConfig.from_pretrained(lowercase__ ) else: __lowercase= SpeechTaHifiGanConfig() __lowercase= SpeechTaHifiGan(lowercase__ ) __lowercase= torch.load(lowercase__ ) load_weights(orig_checkpoint['model']['generator'] , lowercase__ , lowercase__ ) __lowercase= np.load(lowercase__ ) __lowercase= stats[0].reshape(-1 ) __lowercase= stats[1].reshape(-1 ) __lowercase= torch.from_numpy(lowercase__ ).float() __lowercase= torch.from_numpy(lowercase__ ).float() model.save_pretrained(lowercase__ ) if repo_id: print('Pushing to the hub...' ) model.push_to_hub(lowercase__ ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) lowerCAmelCase = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
304
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class A ( A_ ): UpperCamelCase_ : Optional[int] ='''blenderbot-small''' UpperCamelCase_ : Optional[Any] =['''past_key_values'''] UpperCamelCase_ : Optional[int] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=5_1_2 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ): __lowercase= vocab_size __lowercase= max_position_embeddings __lowercase= d_model __lowercase= encoder_ffn_dim __lowercase= encoder_layers __lowercase= encoder_attention_heads __lowercase= decoder_ffn_dim __lowercase= decoder_layers __lowercase= decoder_attention_heads __lowercase= dropout __lowercase= attention_dropout __lowercase= activation_dropout __lowercase= activation_function __lowercase= init_std __lowercase= encoder_layerdrop __lowercase= decoder_layerdrop __lowercase= use_cache __lowercase= encoder_layers __lowercase= scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , ) class A ( A_ ): @property def _A (self ): if self.task in ["default", "seq2seq-lm"]: __lowercase= OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: __lowercase= {0: 'batch'} __lowercase= {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __lowercase= {0: 'batch', 1: 'decoder_sequence'} __lowercase= {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' ) elif self.task == "causal-lm": # TODO: figure this case out. __lowercase= OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: __lowercase, __lowercase= self.num_layers for i in range(lowerCAmelCase ): __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} else: __lowercase= OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ] ) return common_inputs @property def _A (self ): if self.task in ["default", "seq2seq-lm"]: __lowercase= super().outputs else: __lowercase= super(lowerCAmelCase , self ).outputs if self.use_past: __lowercase, __lowercase= self.num_layers for i in range(lowerCAmelCase ): __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # Generate decoder inputs __lowercase= seq_length if not self.use_past else 1 __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) __lowercase= {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} __lowercase= dict(**lowerCAmelCase , **lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __lowercase, __lowercase= common_inputs['input_ids'].shape __lowercase= common_inputs['decoder_input_ids'].shape[1] __lowercase, __lowercase= self.num_attention_heads __lowercase= ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase= decoder_seq_length + 3 __lowercase= ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __lowercase= torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 ) __lowercase= [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __lowercase, __lowercase= self.num_layers __lowercase= min(lowerCAmelCase , lowerCAmelCase ) __lowercase= max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers __lowercase= 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(lowerCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), ) ) # TODO: test this. __lowercase= encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(lowerCAmelCase , lowerCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) ) return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __lowercase, __lowercase= common_inputs['input_ids'].shape # Not using the same length for past_key_values __lowercase= seqlen + 2 __lowercase, __lowercase= self.num_layers __lowercase, __lowercase= self.num_attention_heads __lowercase= ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase= common_inputs['attention_mask'].dtype __lowercase= torch.cat( [common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 ) __lowercase= [ (torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase ) ] return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __lowercase= compute_effective_axis_dimension( lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __lowercase= tokenizer.num_special_tokens_to_add(lowerCAmelCase ) __lowercase= compute_effective_axis_dimension( lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence __lowercase= [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size __lowercase= dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) ) return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): if self.task in ["default", "seq2seq-lm"]: __lowercase= self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) elif self.task == "causal-lm": __lowercase= self._generate_dummy_inputs_for_causal_lm( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) else: __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if self.task in ["default", "seq2seq-lm"]: __lowercase= super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) else: __lowercase= super(lowerCAmelCase , self )._flatten_past_key_values_( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
304
1
import csv import tweepy # Twitter API credentials lowerCAmelCase = '''''' lowerCAmelCase = '''''' lowerCAmelCase = '''''' lowerCAmelCase = '''''' def _lowerCamelCase( lowercase__ ) -> None: '''simple docstring''' __lowercase= tweepy.OAuthHandler(lowercase__ , lowercase__ ) auth.set_access_token(lowercase__ , lowercase__ ) __lowercase= tweepy.API(lowercase__ ) # initialize a list to hold all the tweepy Tweets __lowercase= [] # make initial request for most recent tweets (200 is the maximum allowed count) __lowercase= api.user_timeline(screen_name=lowercase__ , count=2_0_0 ) # save most recent tweets alltweets.extend(lowercase__ ) # save the id of the oldest tweet less one __lowercase= alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowercase__ ) > 0: print(F'getting tweets before {oldest}' ) # all subsequent requests use the max_id param to prevent duplicates __lowercase= api.user_timeline( screen_name=lowercase__ , count=2_0_0 , max_id=lowercase__ ) # save most recent tweets alltweets.extend(lowercase__ ) # update the id of the oldest tweet less one __lowercase= alltweets[-1].id - 1 print(F'...{len(lowercase__ )} tweets downloaded so far' ) # transform the tweepy tweets into a 2D array that will populate the csv __lowercase= [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(F'new_{screen_name}_tweets.csv' , 'w' ) as f: __lowercase= csv.writer(lowercase__ ) writer.writerow(['id', 'created_at', 'text'] ) writer.writerows(lowercase__ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('''FirePing32''')
304
from math import factorial, radians def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float: '''simple docstring''' __lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians __lowercase= radians(lowercase__ ) __lowercase= angle_in_radians __lowercase= 3 __lowercase= -1 for _ in range(lowercase__ ): result += (b * (angle_in_radians**a)) / factorial(lowercase__ ) __lowercase= -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(lowercase__ , lowercase__ ) if __name__ == "__main__": __import__('''doctest''').testmod()
304
1
from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCAmelCase = logging.get_logger(__name__) class A ( A_ ): UpperCamelCase_ : Tuple =['''pixel_values'''] def __init__(self , lowerCAmelCase = True , lowerCAmelCase = 3_2 , lowerCAmelCase=PILImageResampling.BILINEAR , lowerCAmelCase = True , **lowerCAmelCase , ): __lowercase= do_resize __lowercase= do_rescale __lowercase= size_divisor __lowercase= resample super().__init__(**lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase ): __lowercase, __lowercase= get_image_size(lowerCAmelCase ) # Rounds the height and width down to the closest multiple of size_divisor __lowercase= height // size_divisor * size_divisor __lowercase= width // size_divisor * size_divisor __lowercase= resize(lowerCAmelCase , (new_h, new_w) , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase ) return image def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase ): return rescale(image=lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase=None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ): __lowercase= do_resize if do_resize is not None else self.do_resize __lowercase= do_rescale if do_rescale is not None else self.do_rescale __lowercase= size_divisor if size_divisor is not None else self.size_divisor __lowercase= resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('size_divisor is required for resizing' ) __lowercase= make_list_of_images(lowerCAmelCase ) if not valid_images(lowerCAmelCase ): raise ValueError('Invalid image(s)' ) # All transformations expect numpy arrays. __lowercase= [to_numpy_array(lowerCAmelCase ) for img in images] if do_resize: __lowercase= [self.resize(lowerCAmelCase , size_divisor=lowerCAmelCase , resample=lowerCAmelCase ) for image in images] if do_rescale: __lowercase= [self.rescale(lowerCAmelCase , scale=1 / 2_5_5 ) for image in images] __lowercase= [to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images] __lowercase= {'pixel_values': images} return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
304
lowerCAmelCase = [ 9_9_9, 8_0_0, 7_9_9, 6_0_0, 5_9_9, 5_0_0, 4_0_0, 3_9_9, 3_7_7, 3_5_5, 3_3_3, 3_1_1, 2_8_8, 2_6_6, 2_4_4, 2_2_2, 2_0_0, 1_9_9, 1_7_7, 1_5_5, 1_3_3, 1_1_1, 8_8, 6_6, 4_4, 2_2, 0, ] lowerCAmelCase = [ 9_9_9, 9_7_6, 9_5_2, 9_2_8, 9_0_5, 8_8_2, 8_5_8, 8_5_7, 8_1_0, 7_6_2, 7_1_5, 7_1_4, 5_7_2, 4_2_9, 4_2_8, 2_8_6, 2_8_5, 2_3_8, 1_9_0, 1_4_3, 1_4_2, 1_1_8, 9_5, 7_1, 4_7, 2_4, 0, ] lowerCAmelCase = [ 9_9_9, 9_8_8, 9_7_7, 9_6_6, 9_5_5, 9_4_4, 9_3_3, 9_2_2, 9_1_1, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_5_0, 3_0_0, 2_9_9, 2_6_6, 2_3_3, 2_0_0, 1_9_9, 1_7_9, 1_5_9, 1_4_0, 1_2_0, 1_0_0, 9_9, 8_8, 7_7, 6_6, 5_5, 4_4, 3_3, 2_2, 1_1, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_5, 9_9_2, 9_8_9, 9_8_5, 9_8_1, 9_7_8, 9_7_5, 9_7_1, 9_6_7, 9_6_4, 9_6_1, 9_5_7, 9_5_6, 9_5_1, 9_4_7, 9_4_2, 9_3_7, 9_3_3, 9_2_8, 9_2_3, 9_1_9, 9_1_4, 9_1_3, 9_0_8, 9_0_3, 8_9_7, 8_9_2, 8_8_7, 8_8_1, 8_7_6, 8_7_1, 8_7_0, 8_6_4, 8_5_8, 8_5_2, 8_4_6, 8_4_0, 8_3_4, 8_2_8, 8_2_7, 8_2_0, 8_1_3, 8_0_6, 7_9_9, 7_9_2, 7_8_5, 7_8_4, 7_7_7, 7_7_0, 7_6_3, 7_5_6, 7_4_9, 7_4_2, 7_4_1, 7_3_3, 7_2_4, 7_1_6, 7_0_7, 6_9_9, 6_9_8, 6_8_8, 6_7_7, 6_6_6, 6_5_6, 6_5_5, 6_4_5, 6_3_4, 6_2_3, 6_1_3, 6_1_2, 5_9_8, 5_8_4, 5_7_0, 5_6_9, 5_5_5, 5_4_1, 5_2_7, 5_2_6, 5_0_5, 4_8_4, 4_8_3, 4_6_2, 4_4_0, 4_3_9, 3_9_6, 3_9_5, 3_5_2, 3_5_1, 3_0_8, 3_0_7, 2_6_4, 2_6_3, 2_2_0, 2_1_9, 1_7_6, 1_3_2, 8_8, 4_4, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_7, 9_9_5, 9_9_2, 9_9_0, 9_8_8, 9_8_6, 9_8_4, 9_8_1, 9_7_9, 9_7_7, 9_7_5, 9_7_2, 9_7_0, 9_6_8, 9_6_6, 9_6_4, 9_6_1, 9_5_9, 9_5_7, 9_5_6, 9_5_4, 9_5_1, 9_4_9, 9_4_6, 9_4_4, 9_4_1, 9_3_9, 9_3_6, 9_3_4, 9_3_1, 9_2_9, 9_2_6, 9_2_4, 9_2_1, 9_1_9, 9_1_6, 9_1_4, 9_1_3, 9_1_0, 9_0_7, 9_0_5, 9_0_2, 8_9_9, 8_9_6, 8_9_3, 8_9_1, 8_8_8, 8_8_5, 8_8_2, 8_7_9, 8_7_7, 8_7_4, 8_7_1, 8_7_0, 8_6_7, 8_6_4, 8_6_1, 8_5_8, 8_5_5, 8_5_2, 8_4_9, 8_4_6, 8_4_3, 8_4_0, 8_3_7, 8_3_4, 8_3_1, 8_2_8, 8_2_7, 8_2_4, 8_2_1, 8_1_7, 8_1_4, 8_1_1, 8_0_8, 8_0_4, 8_0_1, 7_9_8, 7_9_5, 7_9_1, 7_8_8, 7_8_5, 7_8_4, 7_8_0, 7_7_7, 7_7_4, 7_7_0, 7_6_6, 7_6_3, 7_6_0, 7_5_6, 7_5_2, 7_4_9, 7_4_6, 7_4_2, 7_4_1, 7_3_7, 7_3_3, 7_3_0, 7_2_6, 7_2_2, 7_1_8, 7_1_4, 7_1_0, 7_0_7, 7_0_3, 6_9_9, 6_9_8, 6_9_4, 6_9_0, 6_8_5, 6_8_1, 6_7_7, 6_7_3, 6_6_9, 6_6_4, 6_6_0, 6_5_6, 6_5_5, 6_5_0, 6_4_6, 6_4_1, 6_3_6, 6_3_2, 6_2_7, 6_2_2, 6_1_8, 6_1_3, 6_1_2, 6_0_7, 6_0_2, 5_9_6, 5_9_1, 5_8_6, 5_8_0, 5_7_5, 5_7_0, 5_6_9, 5_6_3, 5_5_7, 5_5_1, 5_4_5, 5_3_9, 5_3_3, 5_2_7, 5_2_6, 5_1_9, 5_1_2, 5_0_5, 4_9_8, 4_9_1, 4_8_4, 4_8_3, 4_7_4, 4_6_6, 4_5_7, 4_4_9, 4_4_0, 4_3_9, 4_2_8, 4_1_8, 4_0_7, 3_9_6, 3_9_5, 3_8_1, 3_6_6, 3_5_2, 3_5_1, 3_3_0, 3_0_8, 3_0_7, 2_8_6, 2_6_4, 2_6_3, 2_4_2, 2_2_0, 2_1_9, 1_7_6, 1_7_5, 1_3_2, 1_3_1, 8_8, 4_4, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_1, 9_8_2, 9_7_4, 9_6_6, 9_5_8, 9_5_0, 9_4_1, 9_3_3, 9_2_5, 9_1_6, 9_0_8, 9_0_0, 8_9_9, 8_7_4, 8_5_0, 8_2_5, 8_0_0, 7_9_9, 7_0_0, 6_0_0, 5_0_0, 4_0_0, 3_0_0, 2_0_0, 1_0_0, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_2, 9_8_5, 9_7_8, 9_7_1, 9_6_4, 9_5_7, 9_4_9, 9_4_2, 9_3_5, 9_2_8, 9_2_1, 9_1_4, 9_0_7, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_0_0, 2_9_9, 2_0_0, 1_9_9, 1_0_0, 9_9, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_6, 9_9_2, 9_8_9, 9_8_5, 9_8_2, 9_7_9, 9_7_5, 9_7_2, 9_6_8, 9_6_5, 9_6_1, 9_5_8, 9_5_5, 9_5_1, 9_4_8, 9_4_4, 9_4_1, 9_3_8, 9_3_4, 9_3_1, 9_2_7, 9_2_4, 9_2_0, 9_1_7, 9_1_4, 9_1_0, 9_0_7, 9_0_3, 9_0_0, 8_9_9, 8_9_1, 8_8_4, 8_7_6, 8_6_9, 8_6_1, 8_5_3, 8_4_6, 8_3_8, 8_3_0, 8_2_3, 8_1_5, 8_0_8, 8_0_0, 7_9_9, 7_8_8, 7_7_7, 7_6_6, 7_5_5, 7_4_4, 7_3_3, 7_2_2, 7_1_1, 7_0_0, 6_9_9, 6_8_8, 6_7_7, 6_6_6, 6_5_5, 6_4_4, 6_3_3, 6_2_2, 6_1_1, 6_0_0, 5_9_9, 5_8_5, 5_7_1, 5_5_7, 5_4_2, 5_2_8, 5_1_4, 5_0_0, 4_9_9, 4_8_5, 4_7_1, 4_5_7, 4_4_2, 4_2_8, 4_1_4, 4_0_0, 3_9_9, 3_7_9, 3_5_9, 3_4_0, 3_2_0, 3_0_0, 2_9_9, 2_7_9, 2_5_9, 2_4_0, 2_2_0, 2_0_0, 1_9_9, 1_6_6, 1_3_3, 1_0_0, 9_9, 6_6, 3_3, 0, ]
304
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
304
from __future__ import annotations import numpy as np def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' return np.maximum(0 , lowercase__ ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
304
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''YolosFeatureExtractor'''] lowerCAmelCase = ['''YolosImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''YolosForObjectDetection''', '''YolosModel''', '''YolosPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
304
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int: '''simple docstring''' __lowercase= 2**power __lowercase= str(lowercase__ ) __lowercase= list(lowercase__ ) __lowercase= 0 for i in list_num: sum_of_num += int(lowercase__ ) return sum_of_num if __name__ == "__main__": lowerCAmelCase = int(input('''Enter the power of 2: ''').strip()) print('''2 ^ ''', power, ''' = ''', 2**power) lowerCAmelCase = solution(power) print('''Sum of the digits is: ''', result)
304
1
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class A ( enum.Enum ): UpperCamelCase_ : Dict =0 UpperCamelCase_ : Any =1 UpperCamelCase_ : List[str] =2 @add_end_docstrings(A_ ) class A ( A_ ): UpperCamelCase_ : Tuple =''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__(self , *lowerCAmelCase , **lowerCAmelCase ): super().__init__(*lowerCAmelCase , **lowerCAmelCase ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. __lowercase= None if self.model.config.prefix is not None: __lowercase= self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. __lowercase= self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. __lowercase, __lowercase, __lowercase= self._sanitize_parameters(prefix=lowerCAmelCase , **self._forward_params ) __lowercase= {**self._preprocess_params, **preprocess_params} __lowercase= {**self._forward_params, **forward_params} def _A (self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ): __lowercase= {} if prefix is not None: __lowercase= prefix if prefix: __lowercase= self.tokenizer( lowerCAmelCase , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework ) __lowercase= prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected' ' [None, \'hole\']' ) __lowercase= handle_long_generation preprocess_params.update(lowerCAmelCase ) __lowercase= generate_kwargs __lowercase= {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) __lowercase= ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) __lowercase= ReturnType.TENSORS if return_type is not None: __lowercase= return_type if clean_up_tokenization_spaces is not None: __lowercase= clean_up_tokenization_spaces if stop_sequence is not None: __lowercase= self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) if len(lowerCAmelCase ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) __lowercase= stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def _A (self , *lowerCAmelCase , **lowerCAmelCase ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*lowerCAmelCase , **lowerCAmelCase ) def __call__(self , lowerCAmelCase , **lowerCAmelCase ): return super().__call__(lowerCAmelCase , **lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase="" , lowerCAmelCase=None , **lowerCAmelCase ): __lowercase= self.tokenizer( prefix + prompt_text , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework ) __lowercase= prompt_text if handle_long_generation == "hole": __lowercase= inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: __lowercase= generate_kwargs['max_new_tokens'] else: __lowercase= generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: __lowercase= self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) __lowercase= inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: __lowercase= inputs['attention_mask'][:, -keep_length:] return inputs def _A (self , lowerCAmelCase , **lowerCAmelCase ): __lowercase= model_inputs['input_ids'] __lowercase= model_inputs.get('attention_mask' , lowerCAmelCase ) # Allow empty prompts if input_ids.shape[1] == 0: __lowercase= None __lowercase= None __lowercase= 1 else: __lowercase= input_ids.shape[0] __lowercase= model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. __lowercase= generate_kwargs.pop('prefix_length' , 0 ) if prefix_length > 0: __lowercase= 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: __lowercase= generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length __lowercase= 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL __lowercase= self.model.generate(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase ) __lowercase= generated_sequence.shape[0] if self.framework == "pt": __lowercase= generated_sequence.reshape(lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": __lowercase= tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def _A (self , lowerCAmelCase , lowerCAmelCase=ReturnType.FULL_TEXT , lowerCAmelCase=True ): __lowercase= model_outputs['generated_sequence'][0] __lowercase= model_outputs['input_ids'] __lowercase= model_outputs['prompt_text'] __lowercase= generated_sequence.numpy().tolist() __lowercase= [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: __lowercase= {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text __lowercase= self.tokenizer.decode( lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: __lowercase= 0 else: __lowercase= len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) ) if return_type == ReturnType.FULL_TEXT: __lowercase= prompt_text + text[prompt_length:] else: __lowercase= text[prompt_length:] __lowercase= {'generated_text': all_text} records.append(lowerCAmelCase ) return records
304
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int: '''simple docstring''' __lowercase= {} if train_file is not None: __lowercase= [train_file] if eval_file is not None: __lowercase= [eval_file] if test_file is not None: __lowercase= [test_file] __lowercase= datasets.load_dataset('csv' , data_files=lowercase__ ) __lowercase= list(ds[list(files.keys() )[0]].features.keys() ) __lowercase= features_name.pop(lowercase__ ) __lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) ) __lowercase= {label: i for i, label in enumerate(lowercase__ )} __lowercase= tokenizer.model_input_names __lowercase= {} if len(lowercase__ ) == 1: for k in files.keys(): __lowercase= ds[k].map( lambda lowercase__ : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , ) elif len(lowercase__ ) == 2: for k in files.keys(): __lowercase= ds[k].map( lambda lowercase__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: __lowercase= {k: v for k, v in ex.items() if k in input_names} __lowercase= labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: __lowercase= {k: v for k, v in ex.items() if k in input_names} __lowercase= labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: __lowercase= {k: v for k, v in ex.items() if k in input_names} __lowercase= labelaid[ex[label_name]] yield (d, label) __lowercase= ( tf.data.Dataset.from_generator( lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: __lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) __lowercase= ( tf.data.Dataset.from_generator( lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: __lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) __lowercase= ( tf.data.Dataset.from_generator( lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: __lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid lowerCAmelCase = logging.getLogger(__name__) @dataclass class A : UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} ) UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} ) UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} ) UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} ) UpperCamelCase_ : int =field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class A : UpperCamelCase_ : str =field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def _lowerCamelCase( ) -> Optional[Any]: '''simple docstring''' __lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) __lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.info( F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ' F'16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowercase= AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowercase, __lowercase, __lowercase, __lowercase= get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) __lowercase= AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): __lowercase= TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , ) def compute_metrics(lowercase__ ) -> Dict: __lowercase= np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer __lowercase= TFTrainer( model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowercase= {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __lowercase= trainer.evaluate() __lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' ) with open(lowercase__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(F' {key} = {value}' ) writer.write(F'{key} = {value}\n' ) results.update(lowercase__ ) return results if __name__ == "__main__": main()
304
1
import functools def _lowerCamelCase( lowercase__ , lowercase__ ) -> int: '''simple docstring''' __lowercase= len(lowercase__ ) __lowercase= len(lowercase__ ) @functools.cache def min_distance(lowercase__ , lowercase__ ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa __lowercase= int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , lowercase__ ) , 1 + min_distance(lowercase__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
304
import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A ( A_ ): def _A (self ): __lowercase= self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) ) self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) ) class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ): __lowercase= parent __lowercase= batch_size __lowercase= image_size __lowercase= patch_sizes __lowercase= patch_stride __lowercase= patch_padding __lowercase= is_training __lowercase= use_labels __lowercase= num_labels __lowercase= num_channels __lowercase= embed_dim __lowercase= num_heads __lowercase= stride_kv __lowercase= depth __lowercase= cls_token __lowercase= attention_drop_rate __lowercase= initializer_range __lowercase= layer_norm_eps def _A (self ): __lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.num_labels ) __lowercase= self.get_config() return config, pixel_values, labels def _A (self ): return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= CvtModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= (self.image_size, self.image_size) __lowercase, __lowercase= image_size[0], image_size[1] for i in range(len(self.depth ) ): __lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) __lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= CvtForImageClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() __lowercase, __lowercase, __lowercase= config_and_inputs __lowercase= {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else () UpperCamelCase_ : List[str] =( {'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification} if is_torch_available() else {} ) UpperCamelCase_ : str =False UpperCamelCase_ : List[Any] =False UpperCamelCase_ : Any =False UpperCamelCase_ : Union[str, Any] =False UpperCamelCase_ : Tuple =False def _A (self ): __lowercase= CvtModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 ) def _A (self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _A (self ): return @unittest.skip(reason='Cvt does not output attentions' ) def _A (self ): pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def _A (self ): pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def _A (self ): pass def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= model_class(lowerCAmelCase ) __lowercase= inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def _A (self ): def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() with torch.no_grad(): __lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) ) __lowercase= outputs.hidden_states __lowercase= len(self.model_tester.depth ) self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase= True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _A (self ): pass @slow def _A (self ): for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= CvtModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def _lowerCamelCase( ) -> Optional[int]: '''simple docstring''' __lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class A ( unittest.TestCase ): @cached_property def _A (self ): return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def _A (self ): __lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase ) __lowercase= self.default_image_processor __lowercase= prepare_img() __lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase ) # forward pass with torch.no_grad(): __lowercase= model(**lowerCAmelCase ) # verify the logits __lowercase= torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase ) __lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
304
1
import random class A : @staticmethod def _A (lowerCAmelCase ): __lowercase= [ord(lowerCAmelCase ) for i in text] __lowercase= [] __lowercase= [] for i in plain: __lowercase= random.randint(1 , 3_0_0 ) __lowercase= (i + k) * k cipher.append(lowerCAmelCase ) key.append(lowerCAmelCase ) return cipher, key @staticmethod def _A (lowerCAmelCase , lowerCAmelCase ): __lowercase= [] for i in range(len(lowerCAmelCase ) ): __lowercase= int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(lowerCAmelCase ) ) return "".join(lowerCAmelCase ) if __name__ == "__main__": lowerCAmelCase ,lowerCAmelCase = Onepad().encrypt('''Hello''') print(c, k) print(Onepad().decrypt(c, k))
304
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MraForMaskedLM''', '''MraForMultipleChoice''', '''MraForQuestionAnswering''', '''MraForSequenceClassification''', '''MraForTokenClassification''', '''MraLayer''', '''MraModel''', '''MraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
304
1
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) # TODO Update this lowerCAmelCase = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class A ( A_ ): UpperCamelCase_ : Any ='''esm''' def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=7_6_8 , lowerCAmelCase=1_2 , lowerCAmelCase=1_2 , lowerCAmelCase=3_0_7_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=1_0_2_6 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ): super().__init__(pad_token_id=lowerCAmelCase , mask_token_id=lowerCAmelCase , **lowerCAmelCase ) __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= initializer_range __lowercase= layer_norm_eps __lowercase= position_embedding_type __lowercase= use_cache __lowercase= emb_layer_norm_before __lowercase= token_dropout __lowercase= is_folding_model if is_folding_model: if esmfold_config is None: logger.info('No esmfold_config supplied for folding model, using default values.' ) __lowercase= EsmFoldConfig() elif isinstance(lowerCAmelCase , lowerCAmelCase ): __lowercase= EsmFoldConfig(**lowerCAmelCase ) __lowercase= esmfold_config if vocab_list is None: logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' ) __lowercase= get_default_vocab_list() else: __lowercase= vocab_list else: __lowercase= None __lowercase= None if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , lowerCAmelCase ): raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' ) def _A (self ): __lowercase= super().to_dict() if isinstance(self.esmfold_config , lowerCAmelCase ): __lowercase= self.esmfold_config.to_dict() return output @dataclass class A : UpperCamelCase_ : str =None UpperCamelCase_ : bool =True UpperCamelCase_ : bool =False UpperCamelCase_ : bool =False UpperCamelCase_ : bool =False UpperCamelCase_ : float =0 UpperCamelCase_ : bool =True UpperCamelCase_ : bool =False UpperCamelCase_ : int =128 UpperCamelCase_ : "TrunkConfig" =None def _A (self ): if self.trunk is None: __lowercase= TrunkConfig() elif isinstance(self.trunk , lowerCAmelCase ): __lowercase= TrunkConfig(**self.trunk ) def _A (self ): __lowercase= asdict(self ) __lowercase= self.trunk.to_dict() return output @dataclass class A : UpperCamelCase_ : int =48 UpperCamelCase_ : int =1_024 UpperCamelCase_ : int =128 UpperCamelCase_ : int =32 UpperCamelCase_ : int =32 UpperCamelCase_ : int =32 UpperCamelCase_ : float =0 UpperCamelCase_ : float =0 UpperCamelCase_ : bool =False UpperCamelCase_ : int =4 UpperCamelCase_ : Optional[int] =128 UpperCamelCase_ : "StructureModuleConfig" =None def _A (self ): if self.structure_module is None: __lowercase= StructureModuleConfig() elif isinstance(self.structure_module , lowerCAmelCase ): __lowercase= StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got' f' {self.sequence_state_dim} and {self.sequence_state_dim}.' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got' f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' ) __lowercase= self.sequence_state_dim // self.sequence_head_width __lowercase= self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got' f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got' f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' ) if self.dropout >= 0.4: raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' ) def _A (self ): __lowercase= asdict(self ) __lowercase= self.structure_module.to_dict() return output @dataclass class A : UpperCamelCase_ : int =384 UpperCamelCase_ : int =128 UpperCamelCase_ : int =16 UpperCamelCase_ : int =128 UpperCamelCase_ : int =12 UpperCamelCase_ : int =4 UpperCamelCase_ : int =8 UpperCamelCase_ : float =0.1 UpperCamelCase_ : int =8 UpperCamelCase_ : int =1 UpperCamelCase_ : int =2 UpperCamelCase_ : int =7 UpperCamelCase_ : int =10 UpperCamelCase_ : float =1e-8 UpperCamelCase_ : float =1e5 def _A (self ): return asdict(self ) def _lowerCamelCase( ) -> int: '''simple docstring''' return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
304
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: ''' lowerCAmelCase = '''======= >>>>>>> ''' lowerCAmelCase = [ '''TextEncoderConfig''', '''ByteTextEncoder''', '''SubwordTextEncoder''', '''encoder_config''', '''maybe_build_from_corpus''', '''manual_dir''', ] lowerCAmelCase = [ # (pattern, replacement) # Order is important here for some replacements (R'''tfds\.core''', R'''datasets'''), (R'''tf\.io\.gfile\.GFile''', R'''open'''), (R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''), (R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''), (R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''), (R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''), (R'''tfds\.features\.FeaturesDict\(''', R'''dict('''), (R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''), (R'''tfds\.''', R'''datasets.'''), (R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''), (R'''self\.builder_config''', R'''self.config'''), ] def _lowerCamelCase( lowercase__ ) -> Optional[int]: '''simple docstring''' return ConvertCommand(args.tfds_path , args.datasets_directory ) class A ( A_ ): @staticmethod def _A (lowerCAmelCase ): __lowercase= parser.add_parser( 'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , ) train_parser.add_argument( '--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , ) train_parser.add_argument( '--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' ) train_parser.set_defaults(func=lowerCAmelCase ) def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= get_logger('datasets-cli/converting' ) __lowercase= tfds_path __lowercase= datasets_directory def _A (self ): if os.path.isdir(self._tfds_path ): __lowercase= os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): __lowercase= os.path.dirname(self._tfds_path ) else: raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' ) __lowercase= os.path.abspath(self._datasets_directory ) self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' ) __lowercase= [] __lowercase= [] __lowercase= {} if os.path.isdir(self._tfds_path ): __lowercase= os.listdir(lowerCAmelCase ) else: __lowercase= [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'Looking at file {f_name}' ) __lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase ) __lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase ) if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('Skipping file' ) continue with open(lowerCAmelCase , encoding='utf-8' ) as f: __lowercase= f.readlines() __lowercase= [] __lowercase= False __lowercase= False __lowercase= [] for line in lines: __lowercase= line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: __lowercase= 'import datasets\n' elif "import tensorflow" in out_line: # order is important here __lowercase= '' continue elif "from absl import logging" in out_line: __lowercase= 'from datasets import logging\n' elif "getLogger" in out_line: __lowercase= out_line.replace('getLogger' , 'get_logger' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): __lowercase= True __lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' ) out_lines.append(lowerCAmelCase ) out_lines.append(lowerCAmelCase ) continue else: for pattern, replacement in TO_CONVERT: __lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: __lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) ) __lowercase= 'from . import ' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'Error converting {out_line.strip()}' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: __lowercase= True out_lines.append(lowerCAmelCase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset __lowercase= f_name.replace('.py' , '' ) __lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase ) __lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase ) os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase ) self._logger.info(f'Adding directory {output_dir}' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(lowerCAmelCase ) if needs_manual_update: with_manual_update.append(lowerCAmelCase ) with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f: f.writelines(lowerCAmelCase ) self._logger.info(f'Converted in {output_file}' ) for utils_file in utils_files: try: __lowercase= os.path.basename(lowerCAmelCase ) __lowercase= imports_to_builder_map[f_name.replace('.py' , '' )] self._logger.info(f'Moving {dest_folder} to {utils_file}' ) shutil.copy(lowerCAmelCase , lowerCAmelCase ) except KeyError: self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
304
1
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=3 , lowerCAmelCase=3_2 , lowerCAmelCase=3 , lowerCAmelCase=1_0 , lowerCAmelCase=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase=[1, 1, 2, 1] , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="relu" , lowerCAmelCase=3 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= image_size __lowercase= num_channels __lowercase= embeddings_size __lowercase= hidden_sizes __lowercase= depths __lowercase= is_training __lowercase= use_labels __lowercase= hidden_act __lowercase= num_labels __lowercase= scope __lowercase= len(lowerCAmelCase ) def _A (self ): __lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.num_labels ) __lowercase= self.get_config() return config, pixel_values, labels def _A (self ): return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= TFRegNetModel(config=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , training=lowerCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= TFRegNetForImageClassification(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() __lowercase, __lowercase, __lowercase= config_and_inputs __lowercase= {'pixel_values': pixel_values} return config, inputs_dict @require_tf class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Dict =(TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () UpperCamelCase_ : Union[str, Any] =( {'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification} if is_tf_available() else {} ) UpperCamelCase_ : Tuple =False UpperCamelCase_ : List[Any] =False UpperCamelCase_ : Tuple =False UpperCamelCase_ : int =False UpperCamelCase_ : Dict =False def _A (self ): __lowercase= TFRegNetModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase ) def _A (self ): return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def _A (self ): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def _A (self ): super().test_keras_fit() @unittest.skip(reason='RegNet does not support input and output embeddings' ) def _A (self ): pass def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= model_class(lowerCAmelCase ) __lowercase= inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def _A (self ): def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= model_class(lowerCAmelCase ) __lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) , training=lowerCAmelCase ) __lowercase= outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowercase= self.model_tester.num_stages self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() __lowercase= ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: __lowercase= layer_type __lowercase= True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase= True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase={} ): __lowercase= model(lowerCAmelCase , return_dict=lowerCAmelCase , **lowerCAmelCase ) __lowercase= model(lowerCAmelCase , return_dict=lowerCAmelCase , **lowerCAmelCase ).to_tuple() def recursive_check(lowerCAmelCase , lowerCAmelCase ): if isinstance(lowerCAmelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase , lowerCAmelCase ): recursive_check(lowerCAmelCase , lowerCAmelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(lowerCAmelCase , lowerCAmelCase ) ) , msg=( 'Tuple and dict output are not equal. Difference:' f' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}' ) , ) recursive_check(lowerCAmelCase , lowerCAmelCase ) for model_class in self.all_model_classes: __lowercase= model_class(lowerCAmelCase ) __lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) __lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) check_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) __lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) __lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) check_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) __lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) __lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) check_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , {'output_hidden_states': True} ) __lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) __lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) check_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , {'output_hidden_states': True} ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase ) @slow def _A (self ): for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= TFRegNetModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def _lowerCamelCase( ) -> Optional[Any]: '''simple docstring''' __lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class A ( unittest.TestCase ): @cached_property def _A (self ): return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _A (self ): __lowercase= TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __lowercase= self.default_image_processor __lowercase= prepare_img() __lowercase= image_processor(images=lowerCAmelCase , return_tensors='tf' ) # forward pass __lowercase= model(**lowerCAmelCase , training=lowerCAmelCase ) # verify the logits __lowercase= tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase ) __lowercase= tf.constant([-0.41_80, -1.50_51, -3.48_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 )
304
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig lowerCAmelCase = { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''', } class A ( A_ ): UpperCamelCase_ : Optional[int] ='''albert''' def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ): super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase ) __lowercase= vocab_size __lowercase= embedding_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_hidden_groups __lowercase= num_attention_heads __lowercase= inner_group_num __lowercase= hidden_act __lowercase= intermediate_size __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= initializer_range __lowercase= layer_norm_eps __lowercase= classifier_dropout_prob __lowercase= position_embedding_type class A ( A_ ): @property def _A (self ): if self.task == "multiple-choice": __lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'} else: __lowercase= {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
304
1
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels lowerCAmelCase = object() # For specifying empty leaf dict `{}` lowerCAmelCase = object() def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[str]: '''simple docstring''' __lowercase= tuple((re.compile(x + '$' ) for x in qs) ) for i in range(len(lowercase__ ) - len(lowercase__ ) + 1 ): __lowercase= [x.match(lowercase__ ) for x, y in zip(lowercase__ , ks[i:] )] if matches and all(lowercase__ ): return True return False def _lowerCamelCase( lowercase__ ) -> List[Any]: '''simple docstring''' def replace(lowercase__ , lowercase__ ): for rule, replacement in rules: if _match(lowercase__ , lowercase__ ): return replacement return val return replace def _lowerCamelCase( ) -> Any: '''simple docstring''' return [ # embeddings (("transformer", "wpe", "embedding"), P('mp' , lowercase__ )), (("transformer", "wte", "embedding"), P('mp' , lowercase__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowercase__ , 'mp' )), (("attention", "out_proj", "kernel"), P('mp' , lowercase__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(lowercase__ , 'mp' )), (("mlp", "c_fc", "bias"), P('mp' )), (("mlp", "c_proj", "kernel"), P('mp' , lowercase__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _lowerCamelCase( lowercase__ ) -> Optional[int]: '''simple docstring''' __lowercase= _get_partition_rules() __lowercase= _replacement_rules(lowercase__ ) __lowercase= {k: _unmatched for k in flatten_dict(lowercase__ )} __lowercase= {k: replace(lowercase__ , lowercase__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(lowercase__ ) )
304
import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]: '''simple docstring''' __lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' __lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' ) __lowercase= transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ), ] ) __lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ ) return image def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' if "visual_encoder" in key: __lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ ) if "blocks" in key: __lowercase= re.sub(R'blocks' , 'layers' , lowercase__ ) if "attn" in key: __lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ ) if "norm1" in key: __lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ ) if "norm2" in key: __lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ ) if "encoder.norm" in key: __lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ ) if "encoder.patch_embed.proj" in key: __lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ ) if "encoder.pos_embed" in key: __lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ ) if "encoder.cls_token" in key: __lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ ) if "self_attn" in key: __lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ ) return key @torch.no_grad() def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int: '''simple docstring''' if config_path is not None: __lowercase= BlipConfig.from_pretrained(lowercase__ ) else: __lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} ) __lowercase= BlipForConditionalGeneration(lowercase__ ).eval() __lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth' __lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' ) __lowercase= pt_model.eval() __lowercase= pt_model.state_dict() for key in modified_state_dict.copy(): __lowercase= modified_state_dict.pop(lowercase__ ) __lowercase= rename_key(lowercase__ ) __lowercase= value hf_model.load_state_dict(lowercase__ ) __lowercase= 3_8_4 __lowercase= load_demo_image(image_size=lowercase__ , device='cpu' ) __lowercase= BertTokenizer.from_pretrained('bert-base-uncased' ) __lowercase= tokenizer(['a picture of'] ).input_ids __lowercase= hf_model.generate(lowercase__ , lowercase__ ) assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2] __lowercase= hf_model.generate(lowercase__ ) assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(lowercase__ ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' __lowercase= ( 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth' ) __lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' ) vqa_model.eval() __lowercase= vqa_model.state_dict() for key in modified_state_dict.copy(): __lowercase= modified_state_dict.pop(lowercase__ ) __lowercase= rename_key(lowercase__ ) __lowercase= value __lowercase= BlipForQuestionAnswering(lowercase__ ) hf_vqa_model.load_state_dict(lowercase__ ) __lowercase= ['How many dogs are in this image?'] __lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids __lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' ) __lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth' __lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' ) itm_model.eval() __lowercase= itm_model.state_dict() for key in modified_state_dict.copy(): __lowercase= modified_state_dict.pop(lowercase__ ) __lowercase= rename_key(lowercase__ ) __lowercase= value __lowercase= BlipForImageTextRetrieval(lowercase__ ) __lowercase= ['A picture of a woman with a dog sitting in a beach'] __lowercase= tokenizer( lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids hf_itm_model.load_state_dict(lowercase__ ) hf_itm_model.eval() __lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ ) __lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ ) assert out[0].item() == 0.2110_6874_9427_7954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') lowerCAmelCase = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
304
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase = logging.get_logger(__name__) class A ( A_ , A_ ): UpperCamelCase_ : List[str] ='''maskformer-swin''' UpperCamelCase_ : List[str] ={ '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__(self , lowerCAmelCase=2_2_4 , lowerCAmelCase=4 , lowerCAmelCase=3 , lowerCAmelCase=9_6 , lowerCAmelCase=[2, 2, 6, 2] , lowerCAmelCase=[3, 6, 1_2, 2_4] , lowerCAmelCase=7 , lowerCAmelCase=4.0 , lowerCAmelCase=True , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase="gelu" , lowerCAmelCase=False , lowerCAmelCase=0.02 , lowerCAmelCase=1E-5 , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ): super().__init__(**lowerCAmelCase ) __lowercase= image_size __lowercase= patch_size __lowercase= num_channels __lowercase= embed_dim __lowercase= depths __lowercase= len(lowerCAmelCase ) __lowercase= num_heads __lowercase= window_size __lowercase= mlp_ratio __lowercase= qkv_bias __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= drop_path_rate __lowercase= hidden_act __lowercase= use_absolute_embeddings __lowercase= layer_norm_eps __lowercase= initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowercase= int(embed_dim * 2 ** (len(lowerCAmelCase ) - 1) ) __lowercase= ['stem'] + [f'stage{idx}' for idx in range(1 , len(lowerCAmelCase ) + 1 )] __lowercase, __lowercase= get_aligned_output_features_output_indices( out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names )
304
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1) lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2) @dataclass class A : UpperCamelCase_ : int UpperCamelCase_ : Node | None class A : def __init__(self , lowerCAmelCase ): __lowercase= None for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ): __lowercase= Node(lowerCAmelCase , self.head ) def __iter__(self ): __lowercase= self.head while node: yield node.data __lowercase= node.next_node def __len__(self ): return sum(1 for _ in self ) def __str__(self ): return " -> ".join([str(lowerCAmelCase ) for node in self] ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList: '''simple docstring''' return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
304
1
def _lowerCamelCase( lowercase__ , lowercase__ ) -> bool: '''simple docstring''' __lowercase= len(lowercase__ ) __lowercase= [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): __lowercase= True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): __lowercase= False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: __lowercase= subset[i - 1][j] if arr[i - 1] <= j: __lowercase= subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
304
from __future__ import annotations from collections.abc import Callable def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float: '''simple docstring''' __lowercase= x_start __lowercase= fnc(lowercase__ ) __lowercase= 0.0 for _ in range(lowercase__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area __lowercase= (x_end - x_start) / steps + xa __lowercase= fnc(lowercase__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step __lowercase= xa __lowercase= fxa return area if __name__ == "__main__": def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' return x**3 + x**2 print('''f(x) = x^3 + x^2''') print('''The area between the curve, x = -5, x = 5 and the x axis is:''') lowerCAmelCase = 1_0 while i <= 1_0_0_0_0_0: print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}') i *= 1_0
304
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase = { '''configuration_blenderbot_small''': [ '''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlenderbotSmallConfig''', '''BlenderbotSmallOnnxConfig''', ], '''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''BlenderbotSmallTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlenderbotSmallForCausalLM''', '''BlenderbotSmallForConditionalGeneration''', '''BlenderbotSmallModel''', '''BlenderbotSmallPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''TFBlenderbotSmallForConditionalGeneration''', '''TFBlenderbotSmallModel''', '''TFBlenderbotSmallPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''FlaxBlenderbotSmallForConditionalGeneration''', '''FlaxBlenderbotSmallModel''', '''FlaxBlenderbotSmallPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
304
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_input_lengths __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= gelu_activation __lowercase= sinusoidal_embeddings __lowercase= causal __lowercase= asm __lowercase= n_langs __lowercase= vocab_size __lowercase= n_special __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= summary_type __lowercase= use_proj __lowercase= scope __lowercase= bos_token_id def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= random_attention_mask([self.batch_size, self.seq_length] ) __lowercase= None if self.use_input_lengths: __lowercase= ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , 2 ).float() __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _A (self ): return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , langs=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMWithLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) __lowercase= outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForQuestionAnswering(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model( lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , ) __lowercase= model( lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , ) ((__lowercase), )= result_with_labels.to_tuple() __lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) ((__lowercase), )= result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= self.num_labels __lowercase= XLMForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= self.num_choices __lowercase= XLMForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : int =( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) UpperCamelCase_ : Dict =( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable UpperCamelCase_ : str =( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def _A (self ): __lowercase= XLMModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ): self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertListEqual( [isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) ) self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(lowerCAmelCase ): # adds PAD dummy token __lowercase= min_length + idx + 1 __lowercase= min_length + idx + 1 __lowercase= ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ): self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertListEqual( [isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , ) self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(lowerCAmelCase ): # adds PAD dummy token __lowercase= min_length + idx + 1 __lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , ) pass @slow def _A (self ): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= XLMModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(lowerCAmelCase ) __lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president __lowercase= [ 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference __lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
304
1
from copy import deepcopy class A : def __init__(self , lowerCAmelCase = None , lowerCAmelCase = None ): if arr is None and size is not None: __lowercase= size __lowercase= [0] * size elif arr is not None: self.init(lowerCAmelCase ) else: raise ValueError('Either arr or size must be specified' ) def _A (self , lowerCAmelCase ): __lowercase= len(lowerCAmelCase ) __lowercase= deepcopy(lowerCAmelCase ) for i in range(1 , self.size ): __lowercase= self.next_(lowerCAmelCase ) if j < self.size: self.tree[j] += self.tree[i] def _A (self ): __lowercase= self.tree[:] for i in range(self.size - 1 , 0 , -1 ): __lowercase= self.next_(lowerCAmelCase ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def _A (lowerCAmelCase ): return index + (index & (-index)) @staticmethod def _A (lowerCAmelCase ): return index - (index & (-index)) def _A (self , lowerCAmelCase , lowerCAmelCase ): if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value __lowercase= self.next_(lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase ): self.add(lowerCAmelCase , value - self.get(lowerCAmelCase ) ) def _A (self , lowerCAmelCase ): if right == 0: return 0 __lowercase= self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] __lowercase= self.prev(lowerCAmelCase ) return result def _A (self , lowerCAmelCase , lowerCAmelCase ): return self.prefix(lowerCAmelCase ) - self.prefix(lowerCAmelCase ) def _A (self , lowerCAmelCase ): return self.query(lowerCAmelCase , index + 1 ) def _A (self , lowerCAmelCase ): value -= self.tree[0] if value < 0: return -1 __lowercase= 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 __lowercase= 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
304
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent lowerCAmelCase = {'''UserAgent''': UserAgent().random} def _lowerCamelCase( lowercase__ ) -> dict: '''simple docstring''' __lowercase= script.contents[0] __lowercase= json.loads(data[data.find('{"config"' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class A : def __init__(self , lowerCAmelCase ): __lowercase= f'https://www.instagram.com/{username}/' __lowercase= self.get_json() def _A (self ): __lowercase= requests.get(self.url , headers=lowerCAmelCase ).text __lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__(self ): return f'{self.__class__.__name__}(\'{self.username}\')' def __str__(self ): return f'{self.fullname} ({self.username}) is {self.biography}' @property def _A (self ): return self.user_data["username"] @property def _A (self ): return self.user_data["full_name"] @property def _A (self ): return self.user_data["biography"] @property def _A (self ): return self.user_data["business_email"] @property def _A (self ): return self.user_data["external_url"] @property def _A (self ): return self.user_data["edge_followed_by"]["count"] @property def _A (self ): return self.user_data["edge_follow"]["count"] @property def _A (self ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def _A (self ): return self.user_data["profile_pic_url_hd"] @property def _A (self ): return self.user_data["is_verified"] @property def _A (self ): return self.user_data["is_private"] def _lowerCamelCase( lowercase__ = "github" ) -> None: '''simple docstring''' import os if os.environ.get('CI' ): return # test failing on GitHub Actions __lowercase= InstagramUser(lowercase__ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , lowercase__ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_5_0 assert instagram_user.number_of_followers > 1_2_0_0_0_0 assert instagram_user.number_of_followings > 1_5 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('https://instagram.' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase = InstagramUser('''github''') print(instagram_user) print(F'{instagram_user.number_of_posts = }') print(F'{instagram_user.number_of_followers = }') print(F'{instagram_user.number_of_followings = }') print(F'{instagram_user.email = }') print(F'{instagram_user.website = }') print(F'{instagram_user.profile_picture_url = }') print(F'{instagram_user.is_verified = }') print(F'{instagram_user.is_private = }')
304
1
from abc import ABC, abstractmethod from argparse import ArgumentParser class A ( A_ ): @staticmethod @abstractmethod def _A (lowerCAmelCase ): raise NotImplementedError() @abstractmethod def _A (self ): raise NotImplementedError()
304
from typing import Any import numpy as np def _lowerCamelCase( lowercase__ ) -> bool: '''simple docstring''' return np.array_equal(lowercase__ , matrix.conjugate().T ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' __lowercase= v.conjugate().T __lowercase= v_star.dot(lowercase__ ) assert isinstance(lowercase__ , np.ndarray ) return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ )) def _lowerCamelCase( ) -> None: '''simple docstring''' __lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) __lowercase= np.array([[1], [2], [3]] ) assert is_hermitian(lowercase__ ), F'{a} is not hermitian.' print(rayleigh_quotient(lowercase__ , lowercase__ ) ) __lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(lowercase__ ), F'{a} is not hermitian.' assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
304
1
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class A ( unittest.TestCase ): def _A (self ): __lowercase= inspect.getfile(accelerate.test_utils ) __lowercase= os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] ) __lowercase= os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] ) __lowercase= os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] ) @require_multi_gpu def _A (self ): print(f'Found {torch.cuda.device_count()} devices.' ) __lowercase= ['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() ) @require_multi_gpu def _A (self ): print(f'Found {torch.cuda.device_count()} devices.' ) __lowercase= ['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path] print(f'Command: {cmd}' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() ) @require_multi_gpu def _A (self ): __lowercase= ['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() ) @require_multi_gpu def _A (self ): print(f'Found {torch.cuda.device_count()} devices, using 2 devices only' ) __lowercase= ['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ): execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase = Accelerator() lowerCAmelCase = (accelerator.state.process_index + 2, 1_0) lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device) lowerCAmelCase = '''''' lowerCAmelCase = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
304
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase = logging.get_logger(__name__) class A ( A_ ): UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask'''] def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ): super().__init__( feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= spectrogram_length __lowercase= num_channels __lowercase= patch_size __lowercase= feature_size // self.patch_size[1] __lowercase= n_fft __lowercase= sampling_rate // hop_length_to_sampling_rate __lowercase= sampling_rate __lowercase= padding_value __lowercase= mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T def _A (self , lowerCAmelCase ): __lowercase= spectrogram( lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , ) __lowercase= log_spec[:, :-1] __lowercase= log_spec - 20.0 __lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled' f' with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) __lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) __lowercase= is_batched_numpy or ( isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ): __lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa ) elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __lowercase= raw_speech.astype(np.floataa ) # always return batch if not is_batched: __lowercase= [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __lowercase= [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , lowerCAmelCase ): __lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __lowercase= max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __lowercase= [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __lowercase= np.array(lowerCAmelCase ).astype(np.floataa ) # convert into correct format for padding __lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __lowercase= padded_audio_features * self.padding_value for i in range(len(lowerCAmelCase ) ): __lowercase= audio_features[i] __lowercase= feature # return as BatchFeature if return_attention_mask: __lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: __lowercase= {'audio_values': padded_audio_features} __lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase ) return encoded_inputs
304
1
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig lowerCAmelCase = logging.get_logger(__name__) # General docstring lowerCAmelCase = '''RegNetConfig''' # Base docstring lowerCAmelCase = '''facebook/regnet-y-040''' lowerCAmelCase = [1, 1_0_8_8, 7, 7] # Image classification docstring lowerCAmelCase = '''facebook/regnet-y-040''' lowerCAmelCase = '''tabby, tabby cat''' lowerCAmelCase = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class A ( tf.keras.layers.Layer ): def __init__(self , lowerCAmelCase , lowerCAmelCase = 3 , lowerCAmelCase = 1 , lowerCAmelCase = 1 , lowerCAmelCase = "relu" , **lowerCAmelCase , ): super().__init__(**lowerCAmelCase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __lowercase= tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __lowercase= tf.keras.layers.ConvaD( filters=lowerCAmelCase , kernel_size=lowerCAmelCase , strides=lowerCAmelCase , padding='VALID' , groups=lowerCAmelCase , use_bias=lowerCAmelCase , name='convolution' , ) __lowercase= tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) __lowercase= ACTaFN[activation] if activation is not None else tf.identity def _A (self , lowerCAmelCase ): __lowercase= self.convolution(self.padding(lowerCAmelCase ) ) __lowercase= self.normalization(lowerCAmelCase ) __lowercase= self.activation(lowerCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__(self , lowerCAmelCase , **lowerCAmelCase ): super().__init__(**lowerCAmelCase ) __lowercase= config.num_channels __lowercase= TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , ) def _A (self , lowerCAmelCase ): __lowercase= shape_list(lowerCAmelCase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __lowercase= tf.transpose(lowerCAmelCase , perm=(0, 2, 3, 1) ) __lowercase= self.embedder(lowerCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__(self , lowerCAmelCase , lowerCAmelCase = 2 , **lowerCAmelCase ): super().__init__(**lowerCAmelCase ) __lowercase= tf.keras.layers.ConvaD( filters=lowerCAmelCase , kernel_size=1 , strides=lowerCAmelCase , use_bias=lowerCAmelCase , name='convolution' ) __lowercase= tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) def _A (self , lowerCAmelCase , lowerCAmelCase = False ): return self.normalization(self.convolution(lowerCAmelCase ) , training=lowerCAmelCase ) class A ( tf.keras.layers.Layer ): def __init__(self , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ): super().__init__(**lowerCAmelCase ) __lowercase= tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCAmelCase , name='pooler' ) __lowercase= [ tf.keras.layers.ConvaD(filters=lowerCAmelCase , kernel_size=1 , activation='relu' , name='attention.0' ), tf.keras.layers.ConvaD(filters=lowerCAmelCase , kernel_size=1 , activation='sigmoid' , name='attention.2' ), ] def _A (self , lowerCAmelCase ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __lowercase= self.pooler(lowerCAmelCase ) for layer_module in self.attention: __lowercase= layer_module(lowerCAmelCase ) __lowercase= hidden_state * pooled return hidden_state class A ( tf.keras.layers.Layer ): def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 , **lowerCAmelCase ): super().__init__(**lowerCAmelCase ) __lowercase= in_channels != out_channels or stride != 1 __lowercase= max(1 , out_channels // config.groups_width ) __lowercase= ( TFRegNetShortCut(lowerCAmelCase , stride=lowerCAmelCase , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __lowercase= [ TFRegNetConvLayer(lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act , name='layer.1' ), TFRegNetConvLayer(lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase , name='layer.2' ), ] __lowercase= ACTaFN[config.hidden_act] def _A (self , lowerCAmelCase ): __lowercase= hidden_state for layer_module in self.layers: __lowercase= layer_module(lowerCAmelCase ) __lowercase= self.shortcut(lowerCAmelCase ) hidden_state += residual __lowercase= self.activation(lowerCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 , **lowerCAmelCase ): super().__init__(**lowerCAmelCase ) __lowercase= in_channels != out_channels or stride != 1 __lowercase= max(1 , out_channels // config.groups_width ) __lowercase= ( TFRegNetShortCut(lowerCAmelCase , stride=lowerCAmelCase , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) __lowercase= [ TFRegNetConvLayer(lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act , name='layer.1' ), TFRegNetSELayer(lowerCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ), TFRegNetConvLayer(lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase , name='layer.3' ), ] __lowercase= ACTaFN[config.hidden_act] def _A (self , lowerCAmelCase ): __lowercase= hidden_state for layer_module in self.layers: __lowercase= layer_module(lowerCAmelCase ) __lowercase= self.shortcut(lowerCAmelCase ) hidden_state += residual __lowercase= self.activation(lowerCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 2 , lowerCAmelCase = 2 , **lowerCAmelCase ): super().__init__(**lowerCAmelCase ) __lowercase= TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer __lowercase= [ # downsampling is done in the first layer with stride of 2 layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , name='layers.0' ), *[layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , name=f'layers.{i+1}' ) for i in range(depth - 1 )], ] def _A (self , lowerCAmelCase ): for layer_module in self.layers: __lowercase= layer_module(lowerCAmelCase ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__(self , lowerCAmelCase , **lowerCAmelCase ): super().__init__(**lowerCAmelCase ) __lowercase= [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) ) __lowercase= zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCAmelCase , config.depths[1:] ) ): self.stages.append(TFRegNetStage(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , depth=lowerCAmelCase , name=f'stages.{i+1}' ) ) def _A (self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ): __lowercase= () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __lowercase= hidden_states + (hidden_state,) __lowercase= stage_module(lowerCAmelCase ) if output_hidden_states: __lowercase= hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase , hidden_states=lowerCAmelCase ) @keras_serializable class A ( tf.keras.layers.Layer ): UpperCamelCase_ : Union[str, Any] =RegNetConfig def __init__(self , lowerCAmelCase , **lowerCAmelCase ): super().__init__(**lowerCAmelCase ) __lowercase= config __lowercase= TFRegNetEmbeddings(lowerCAmelCase , name='embedder' ) __lowercase= TFRegNetEncoder(lowerCAmelCase , name='encoder' ) __lowercase= tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCAmelCase , name='pooler' ) @unpack_inputs def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , ): __lowercase= ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase= return_dict if return_dict is not None else self.config.use_return_dict __lowercase= self.embedder(lowerCAmelCase , training=lowerCAmelCase ) __lowercase= self.encoder( lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase , training=lowerCAmelCase ) __lowercase= encoder_outputs[0] __lowercase= self.pooler(lowerCAmelCase ) # Change to NCHW output format have uniformity in the modules __lowercase= tf.transpose(lowerCAmelCase , perm=(0, 3, 1, 2) ) __lowercase= tf.transpose(lowerCAmelCase , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __lowercase= tuple([tf.transpose(lowerCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase , pooler_output=lowerCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class A ( A_ ): UpperCamelCase_ : Optional[Any] =RegNetConfig UpperCamelCase_ : Optional[int] ='''regnet''' UpperCamelCase_ : Union[str, Any] ='''pixel_values''' @property def _A (self ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} lowerCAmelCase = R''' Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. ''' lowerCAmelCase = R''' Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''' , A_ , ) class A ( A_ ): def __init__(self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ): super().__init__(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ) __lowercase= TFRegNetMainLayer(lowerCAmelCase , name='regnet' ) @unpack_inputs @add_start_docstrings_to_model_forward(lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase=False , ): __lowercase= ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase= return_dict if return_dict is not None else self.config.use_return_dict __lowercase= self.regnet( pixel_values=lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase , training=lowerCAmelCase , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , A_ , ) class A ( A_ , A_ ): def __init__(self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ): super().__init__(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ) __lowercase= config.num_labels __lowercase= TFRegNetMainLayer(lowerCAmelCase , name='regnet' ) # classification head __lowercase= [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _A (self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase=False , ): __lowercase= ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase= return_dict if return_dict is not None else self.config.use_return_dict __lowercase= self.regnet( lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase , training=lowerCAmelCase ) __lowercase= outputs.pooler_output if return_dict else outputs[1] __lowercase= self.classifier[0](lowerCAmelCase ) __lowercase= self.classifier[1](lowerCAmelCase ) __lowercase= None if labels is None else self.hf_compute_loss(labels=lowerCAmelCase , logits=lowerCAmelCase ) if not return_dict: __lowercase= (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=lowerCAmelCase , logits=lowerCAmelCase , hidden_states=outputs.hidden_states )
304
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' __lowercase= create_tensor(lowercase__ ) __lowercase= gather(lowercase__ ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' __lowercase= [state.process_index] __lowercase= gather_object(lowercase__ ) assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}' assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}' def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' __lowercase= create_tensor(lowercase__ ) __lowercase= broadcast(lowercase__ ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def _lowerCamelCase( lowercase__ ) -> List[Any]: '''simple docstring''' if state.is_main_process: __lowercase= torch.arange(state.num_processes + 1 ).to(state.device ) else: __lowercase= torch.arange(state.num_processes ).to(state.device ) __lowercase= pad_across_processes(lowercase__ ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def _lowerCamelCase( lowercase__ ) -> Any: '''simple docstring''' if state.num_processes != 2: return __lowercase= create_tensor(lowercase__ ) __lowercase= reduce(lowercase__ , 'sum' ) __lowercase= torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}' def _lowerCamelCase( lowercase__ ) -> Union[str, Any]: '''simple docstring''' if state.num_processes != 2: return __lowercase= create_tensor(lowercase__ ) __lowercase= reduce(lowercase__ , 'mean' ) __lowercase= torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}' def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' main() def _lowerCamelCase( ) -> List[str]: '''simple docstring''' __lowercase= PartialState() state.print(F'State: {state}' ) state.print('testing gather' ) test_gather(lowercase__ ) state.print('testing gather_object' ) test_gather_object(lowercase__ ) state.print('testing broadcast' ) test_broadcast(lowercase__ ) state.print('testing pad_across_processes' ) test_pad_across_processes(lowercase__ ) state.print('testing reduce_sum' ) test_reduce_sum(lowercase__ ) state.print('testing reduce_mean' ) test_reduce_mean(lowercase__ ) if __name__ == "__main__": main()
304
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } lowerCAmelCase = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' for attribute in key.split('.' ): __lowercase= getattr(lowercase__ , lowercase__ ) if weight_type is not None: __lowercase= getattr(lowercase__ , lowercase__ ).shape else: __lowercase= hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": __lowercase= value elif weight_type == "weight_g": __lowercase= value elif weight_type == "weight_v": __lowercase= value elif weight_type == "bias": __lowercase= value elif weight_type == "running_mean": __lowercase= value elif weight_type == "running_var": __lowercase= value elif weight_type == "num_batches_tracked": __lowercase= value elif weight_type == "inv_freq": __lowercase= value else: __lowercase= value logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]: '''simple docstring''' __lowercase= [] __lowercase= fairseq_model.state_dict() __lowercase= hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): __lowercase= False if "conv_layers" in name: load_conv_layer( lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == 'group' , ) __lowercase= True else: for key, mapped_key in MAPPING.items(): __lowercase= 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __lowercase= True if "*" in mapped_key: __lowercase= name.split(lowercase__ )[0].split('.' )[-2] __lowercase= mapped_key.replace('*' , lowercase__ ) if "pos_bias_u" in name: __lowercase= None elif "pos_bias_v" in name: __lowercase= None elif "weight_g" in name: __lowercase= 'weight_g' elif "weight_v" in name: __lowercase= 'weight_v' elif "bias" in name: __lowercase= 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowercase= 'weight' elif "running_mean" in name: __lowercase= 'running_mean' elif "inv_freq" in name: __lowercase= 'inv_freq' elif "running_var" in name: __lowercase= 'running_var' elif "num_batches_tracked" in name: __lowercase= 'num_batches_tracked' else: __lowercase= None set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) continue if not is_used: unused_weights.append(lowercase__ ) logger.warning(F'Unused weights: {unused_weights}' ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> str: '''simple docstring''' __lowercase= full_name.split('conv_layers.' )[-1] __lowercase= name.split('.' ) __lowercase= int(items[0] ) __lowercase= int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) __lowercase= value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) __lowercase= value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) __lowercase= value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) __lowercase= value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(lowercase__ ) @torch.no_grad() def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=True ) -> Tuple: '''simple docstring''' if config_path is not None: __lowercase= WavaVecaConformerConfig.from_pretrained(lowercase__ , hidden_act='swish' ) else: __lowercase= WavaVecaConformerConfig() if "rope" in checkpoint_path: __lowercase= 'rotary' if is_finetuned: if dict_path: __lowercase= Dictionary.load(lowercase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowercase= target_dict.pad_index __lowercase= target_dict.bos_index __lowercase= target_dict.eos_index __lowercase= len(target_dict.symbols ) __lowercase= os.path.join(lowercase__ , 'vocab.json' ) if not os.path.isdir(lowercase__ ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase__ ) ) return os.makedirs(lowercase__ , exist_ok=lowercase__ ) __lowercase= target_dict.indices # fairseq has the <pad> and <s> switched __lowercase= 0 __lowercase= 1 with open(lowercase__ , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(lowercase__ , lowercase__ ) __lowercase= WavaVecaCTCTokenizer( lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase__ , ) __lowercase= True if config.feat_extract_norm == 'layer' else False __lowercase= WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , ) __lowercase= WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ ) processor.save_pretrained(lowercase__ ) __lowercase= WavaVecaConformerForCTC(lowercase__ ) else: __lowercase= WavaVecaConformerForPreTraining(lowercase__ ) if is_finetuned: __lowercase, __lowercase, __lowercase= fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: __lowercase= argparse.Namespace(task='audio_pretraining' ) __lowercase= fairseq.tasks.setup_task(lowercase__ ) __lowercase, __lowercase, __lowercase= fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase__ ) __lowercase= model[0].eval() recursively_load_weights(lowercase__ , lowercase__ , not is_finetuned ) hf_wavavec.save_pretrained(lowercase__ ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) lowerCAmelCase = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
304
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class A ( A_ ): UpperCamelCase_ : torch.FloatTensor UpperCamelCase_ : torch.FloatTensor class A ( A_ , A_ ): UpperCamelCase_ : Dict =1 @register_to_config def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ): # standard deviation of the initial noise distribution __lowercase= sigma_max # setable values __lowercase= None self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = None ): return sample def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ): __lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps __lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ): __lowercase= sigma_min if sigma_min is not None else self.config.sigma_min __lowercase= sigma_max if sigma_max is not None else self.config.sigma_max __lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase , lowerCAmelCase ) __lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) __lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) ) __lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def _A (self , lowerCAmelCase , lowerCAmelCase ): return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) __lowercase= timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) __lowercase= (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda __lowercase= timesteps.to(self.discrete_sigmas.device ) __lowercase= self.discrete_sigmas[timesteps].to(sample.device ) __lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device ) __lowercase= torch.zeros_like(lowerCAmelCase ) __lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods __lowercase= diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): __lowercase= diffusion.unsqueeze(-1 ) __lowercase= drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of __lowercase= randn_tensor( sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype ) __lowercase= sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? __lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction __lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr __lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() __lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() __lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2 __lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term __lowercase= step_size.flatten() while len(step_size.shape ) < len(sample.shape ): __lowercase= step_size.unsqueeze(-1 ) __lowercase= sample + step_size * model_output __lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples __lowercase= timesteps.to(original_samples.device ) __lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps] __lowercase= ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None] ) __lowercase= noise + original_samples return noisy_samples def __len__(self ): return self.config.num_train_timesteps
304
1
import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class A : def __init__(self , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase=6_4 , lowerCAmelCase=None ): __lowercase= np.random.default_rng(lowerCAmelCase ) __lowercase= length __lowercase= rng.normal(size=(length,) ).astype(np.floataa ) __lowercase= a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__(self ): return self.length def __getitem__(self , lowerCAmelCase ): return {"x": self.x[i], "y": self.y[i]} class A ( torch.nn.Module ): def __init__(self , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=False ): super().__init__() __lowercase= torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __lowercase= torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __lowercase= True def _A (self , lowerCAmelCase=None ): if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __lowercase= False return x * self.a[0] + self.b[0] class A ( torch.nn.Module ): def __init__(self , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=False ): super().__init__() __lowercase= torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() ) __lowercase= torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() ) __lowercase= True def _A (self , lowerCAmelCase=None ): if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __lowercase= False return x * self.a + self.b def _lowerCamelCase( lowercase__ , lowercase__ = 1_6 ) -> str: '''simple docstring''' from datasets import load_dataset from transformers import AutoTokenizer __lowercase= AutoTokenizer.from_pretrained('bert-base-cased' ) __lowercase= {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'} __lowercase= load_dataset('csv' , data_files=lowercase__ ) __lowercase= datasets['train'].unique('label' ) __lowercase= {v: i for i, v in enumerate(lowercase__ )} def tokenize_function(lowercase__ ): # max_length=None => use the model max length (it's actually the default) __lowercase= tokenizer( examples['sentence1'] , examples['sentence2'] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) if "label" in examples: __lowercase= [label_to_id[l] for l in examples['label']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __lowercase= datasets.map( lowercase__ , batched=lowercase__ , remove_columns=['sentence1', 'sentence2', 'label'] , ) def collate_fn(lowercase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowercase__ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' ) return tokenizer.pad(lowercase__ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. __lowercase= DataLoader(tokenized_datasets['train'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=2 ) __lowercase= DataLoader(tokenized_datasets['validation'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=1 ) return train_dataloader, eval_dataloader
304
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device lowerCAmelCase = False class A ( unittest.TestCase ): pass @nightly @require_torch_gpu class A ( unittest.TestCase ): def _A (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _A (self ): __lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) __lowercase= torch.manual_seed(0 ) __lowercase= pipe.dual_guided( prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCAmelCase ) __lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= generator.manual_seed(0 ) __lowercase= pipe.dual_guided( prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def _A (self ): __lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= 'cyberpunk 2077' __lowercase= load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) __lowercase= torch.manual_seed(0 ) __lowercase= pipe.dual_guided( prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images __lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 __lowercase= 'A painting of a squirrel eating a burger ' __lowercase= torch.manual_seed(0 ) __lowercase= pipe.text_to_image( prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images __lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 __lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images __lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
304
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { '''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class A ( A_ ): UpperCamelCase_ : Dict ='''deformable_detr''' UpperCamelCase_ : Optional[Any] ={ '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__(self , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=3 , lowerCAmelCase=3_0_0 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=6 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=8 , lowerCAmelCase=6 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=8 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase="relu" , lowerCAmelCase=2_5_6 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1.0 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase="sine" , lowerCAmelCase="resnet50" , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase=False , lowerCAmelCase=3_0_0 , lowerCAmelCase=False , lowerCAmelCase=1 , lowerCAmelCase=5 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase=1 , lowerCAmelCase=5 , lowerCAmelCase=2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.25 , lowerCAmelCase=False , **lowerCAmelCase , ): if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' ) if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) __lowercase= CONFIG_MAPPING['resnet'](out_features=['stage4'] ) elif isinstance(lowerCAmelCase , lowerCAmelCase ): __lowercase= backbone_config.get('model_type' ) __lowercase= CONFIG_MAPPING[backbone_model_type] __lowercase= config_class.from_dict(lowerCAmelCase ) __lowercase= use_timm_backbone __lowercase= backbone_config __lowercase= num_channels __lowercase= num_queries __lowercase= max_position_embeddings __lowercase= d_model __lowercase= encoder_ffn_dim __lowercase= encoder_layers __lowercase= encoder_attention_heads __lowercase= decoder_ffn_dim __lowercase= decoder_layers __lowercase= decoder_attention_heads __lowercase= dropout __lowercase= attention_dropout __lowercase= activation_dropout __lowercase= activation_function __lowercase= init_std __lowercase= init_xavier_std __lowercase= encoder_layerdrop __lowercase= auxiliary_loss __lowercase= position_embedding_type __lowercase= backbone __lowercase= use_pretrained_backbone __lowercase= dilation # deformable attributes __lowercase= num_feature_levels __lowercase= encoder_n_points __lowercase= decoder_n_points __lowercase= two_stage __lowercase= two_stage_num_proposals __lowercase= with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('If two_stage is True, with_box_refine must be True.' ) # Hungarian matcher __lowercase= class_cost __lowercase= bbox_cost __lowercase= giou_cost # Loss coefficients __lowercase= mask_loss_coefficient __lowercase= dice_loss_coefficient __lowercase= bbox_loss_coefficient __lowercase= giou_loss_coefficient __lowercase= eos_coefficient __lowercase= focal_alpha __lowercase= disable_custom_kernels super().__init__(is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase ) @property def _A (self ): return self.encoder_attention_heads @property def _A (self ): return self.d_model def _A (self ): __lowercase= copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: __lowercase= self.backbone_config.to_dict() __lowercase= self.__class__.model_type return output
304
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase = { '''configuration_xmod''': [ '''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XmodConfig''', '''XmodOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XmodForCausalLM''', '''XmodForMaskedLM''', '''XmodForMultipleChoice''', '''XmodForQuestionAnswering''', '''XmodForSequenceClassification''', '''XmodForTokenClassification''', '''XmodModel''', '''XmodPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
304
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase = { '''configuration_jukebox''': [ '''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''JukeboxConfig''', '''JukeboxPriorConfig''', '''JukeboxVQVAEConfig''', ], '''tokenization_jukebox''': ['''JukeboxTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''JukeboxModel''', '''JukeboxPreTrainedModel''', '''JukeboxVQVAE''', '''JukeboxPrior''', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
304
import math from datetime import datetime, timedelta def _lowerCamelCase( lowercase__ ) -> datetime: '''simple docstring''' __lowercase= year % 1_9 __lowercase= year % 4 __lowercase= year % 7 __lowercase= math.floor(year / 1_0_0 ) __lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 ) __lowercase= leap_day_inhibits / 4 __lowercase= ( 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 3_0 __lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 __lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0 # PHM -> Paschal Full Moon __lowercase= ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 2_9 and days_from_phm_to_sunday == 6: return datetime(lowercase__ , 4 , 1_9 ) elif days_to_add == 2_8 and days_from_phm_to_sunday == 6: return datetime(lowercase__ , 4 , 1_8 ) else: return datetime(lowercase__ , 3 , 2_2 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3): lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was''' print(F'Easter in {year} {tense} {gauss_easter(year)}')
304
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { '''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''', } class A ( A_ ): UpperCamelCase_ : Optional[Any] ='''nllb-moe''' UpperCamelCase_ : str =['''past_key_values'''] UpperCamelCase_ : Tuple ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__(self , lowerCAmelCase=1_2_8_1_1_2 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=1_2 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_6 , lowerCAmelCase=1_2 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_6 , lowerCAmelCase=0.05 , lowerCAmelCase=0.05 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="relu" , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase="float32" , lowerCAmelCase=False , lowerCAmelCase=1_2_8 , lowerCAmelCase=6_4 , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase=0.0_01 , lowerCAmelCase=0.0_01 , lowerCAmelCase="all" , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=1.0 , lowerCAmelCase=0.2 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=False , **lowerCAmelCase , ): __lowercase= vocab_size __lowercase= max_position_embeddings __lowercase= d_model __lowercase= encoder_ffn_dim __lowercase= encoder_layers __lowercase= encoder_attention_heads __lowercase= decoder_ffn_dim __lowercase= decoder_layers __lowercase= decoder_attention_heads __lowercase= dropout __lowercase= attention_dropout __lowercase= activation_dropout __lowercase= activation_function __lowercase= init_std __lowercase= encoder_layerdrop __lowercase= decoder_layerdrop __lowercase= use_cache __lowercase= encoder_layers __lowercase= scale_embedding # scale factor will be sqrt(d_model) if True __lowercase= router_z_loss_coef __lowercase= router_aux_loss_coef __lowercase= decoder_sparse_step __lowercase= encoder_sparse_step __lowercase= num_experts __lowercase= expert_capacity __lowercase= router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' ) __lowercase= router_dtype __lowercase= router_ignore_padding_tokens __lowercase= batch_prioritized_routing __lowercase= second_expert_policy __lowercase= normalize_router_prob_before_dropping __lowercase= moe_eval_capacity_token_fraction __lowercase= moe_token_dropout __lowercase= output_router_logits super().__init__( pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
304
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class A ( A_ ): UpperCamelCase_ : Optional[int] ='''blenderbot-small''' UpperCamelCase_ : Optional[Any] =['''past_key_values'''] UpperCamelCase_ : Optional[int] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=5_1_2 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ): __lowercase= vocab_size __lowercase= max_position_embeddings __lowercase= d_model __lowercase= encoder_ffn_dim __lowercase= encoder_layers __lowercase= encoder_attention_heads __lowercase= decoder_ffn_dim __lowercase= decoder_layers __lowercase= decoder_attention_heads __lowercase= dropout __lowercase= attention_dropout __lowercase= activation_dropout __lowercase= activation_function __lowercase= init_std __lowercase= encoder_layerdrop __lowercase= decoder_layerdrop __lowercase= use_cache __lowercase= encoder_layers __lowercase= scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , ) class A ( A_ ): @property def _A (self ): if self.task in ["default", "seq2seq-lm"]: __lowercase= OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: __lowercase= {0: 'batch'} __lowercase= {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __lowercase= {0: 'batch', 1: 'decoder_sequence'} __lowercase= {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' ) elif self.task == "causal-lm": # TODO: figure this case out. __lowercase= OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: __lowercase, __lowercase= self.num_layers for i in range(lowerCAmelCase ): __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} else: __lowercase= OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ] ) return common_inputs @property def _A (self ): if self.task in ["default", "seq2seq-lm"]: __lowercase= super().outputs else: __lowercase= super(lowerCAmelCase , self ).outputs if self.use_past: __lowercase, __lowercase= self.num_layers for i in range(lowerCAmelCase ): __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # Generate decoder inputs __lowercase= seq_length if not self.use_past else 1 __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) __lowercase= {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} __lowercase= dict(**lowerCAmelCase , **lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __lowercase, __lowercase= common_inputs['input_ids'].shape __lowercase= common_inputs['decoder_input_ids'].shape[1] __lowercase, __lowercase= self.num_attention_heads __lowercase= ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase= decoder_seq_length + 3 __lowercase= ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __lowercase= torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 ) __lowercase= [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __lowercase, __lowercase= self.num_layers __lowercase= min(lowerCAmelCase , lowerCAmelCase ) __lowercase= max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers __lowercase= 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(lowerCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), ) ) # TODO: test this. __lowercase= encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(lowerCAmelCase , lowerCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) ) return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __lowercase, __lowercase= common_inputs['input_ids'].shape # Not using the same length for past_key_values __lowercase= seqlen + 2 __lowercase, __lowercase= self.num_layers __lowercase, __lowercase= self.num_attention_heads __lowercase= ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase= common_inputs['attention_mask'].dtype __lowercase= torch.cat( [common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 ) __lowercase= [ (torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase ) ] return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __lowercase= compute_effective_axis_dimension( lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __lowercase= tokenizer.num_special_tokens_to_add(lowerCAmelCase ) __lowercase= compute_effective_axis_dimension( lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence __lowercase= [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size __lowercase= dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) ) return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): if self.task in ["default", "seq2seq-lm"]: __lowercase= self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) elif self.task == "causal-lm": __lowercase= self._generate_dummy_inputs_for_causal_lm( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) else: __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if self.task in ["default", "seq2seq-lm"]: __lowercase= super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) else: __lowercase= super(lowerCAmelCase , self )._flatten_past_key_values_( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
304
1
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar lowerCAmelCase = TypeVar('''T''') class A ( Generic[T] ): def __init__(self , lowerCAmelCase ): __lowercase= data __lowercase= None def __str__(self ): return f'{self.data}' class A ( Generic[T] ): def __init__(self ): __lowercase= None def __iter__(self ): __lowercase= self.top while node: yield node.data __lowercase= node.next def __str__(self ): return "->".join([str(lowerCAmelCase ) for item in self] ) def __len__(self ): return len(tuple(iter(self ) ) ) def _A (self ): return self.top is None def _A (self , lowerCAmelCase ): __lowercase= Node(lowerCAmelCase ) if not self.is_empty(): __lowercase= self.top __lowercase= node def _A (self ): if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , lowerCAmelCase ) __lowercase= self.top __lowercase= self.top.next return pop_node.data def _A (self ): if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def _A (self ): __lowercase= None if __name__ == "__main__": from doctest import testmod testmod()
304
from math import factorial, radians def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float: '''simple docstring''' __lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians __lowercase= radians(lowercase__ ) __lowercase= angle_in_radians __lowercase= 3 __lowercase= -1 for _ in range(lowercase__ ): result += (b * (angle_in_radians**a)) / factorial(lowercase__ ) __lowercase= -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(lowercase__ , lowercase__ ) if __name__ == "__main__": __import__('''doctest''').testmod()
304
1
import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase = logging.get_logger(__name__) set_seed(7_7_0) lowerCAmelCase = { '''c_attn''': '''att_proj''', '''c_proj''': '''out_proj''', '''c_fc''': '''in_proj''', '''transformer.''': '''''', '''h.''': '''layers.''', '''ln_1''': '''layernorm_1''', '''ln_2''': '''layernorm_2''', '''ln_f''': '''layernorm_final''', '''wpe''': '''position_embeds_layer''', '''wte''': '''input_embeds_layer''', } lowerCAmelCase = { '''text_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''text.pt''', }, '''coarse_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''coarse.pt''', }, '''fine_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''fine.pt''', }, '''text''': { '''repo_id''': '''suno/bark''', '''file_name''': '''text_2.pt''', }, '''coarse''': { '''repo_id''': '''suno/bark''', '''file_name''': '''coarse_2.pt''', }, '''fine''': { '''repo_id''': '''suno/bark''', '''file_name''': '''fine_2.pt''', }, } lowerCAmelCase = os.path.dirname(os.path.abspath(__file__)) lowerCAmelCase = os.path.join(os.path.expanduser('''~'''), '''.cache''') lowerCAmelCase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''') def _lowerCamelCase( lowercase__ , lowercase__=False ) -> List[Any]: '''simple docstring''' __lowercase= model_type if use_small: key += "_small" return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]['file_name'] ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' os.makedirs(lowercase__ , exist_ok=lowercase__ ) hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=False , lowercase__="text" ) -> Any: '''simple docstring''' if model_type == "text": __lowercase= BarkSemanticModel __lowercase= BarkSemanticConfig __lowercase= BarkSemanticGenerationConfig elif model_type == "coarse": __lowercase= BarkCoarseModel __lowercase= BarkCoarseConfig __lowercase= BarkCoarseGenerationConfig elif model_type == "fine": __lowercase= BarkFineModel __lowercase= BarkFineConfig __lowercase= BarkFineGenerationConfig else: raise NotImplementedError() __lowercase= F'{model_type}_small' if use_small else model_type __lowercase= REMOTE_MODEL_PATHS[model_key] if not os.path.exists(lowercase__ ): logger.info(F'{model_type} model not found, downloading into `{CACHE_DIR}`.' ) _download(model_info['repo_id'] , model_info['file_name'] ) __lowercase= torch.load(lowercase__ , map_location=lowercase__ ) # this is a hack __lowercase= checkpoint['model_args'] if "input_vocab_size" not in model_args: __lowercase= model_args['vocab_size'] __lowercase= model_args['vocab_size'] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments __lowercase= model_args.pop('n_head' ) __lowercase= model_args.pop('n_embd' ) __lowercase= model_args.pop('n_layer' ) __lowercase= ConfigClass(**checkpoint['model_args'] ) __lowercase= ModelClass(config=lowercase__ ) __lowercase= GenerationConfigClass() __lowercase= model_generation_config __lowercase= checkpoint['model'] # fixup checkpoint __lowercase= '_orig_mod.' for k, v in list(state_dict.items() ): if k.startswith(lowercase__ ): # replace part of the key with corresponding layer name in HF implementation __lowercase= k[len(lowercase__ ) :] for old_layer_name in new_layer_name_dict: __lowercase= new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] ) __lowercase= state_dict.pop(lowercase__ ) __lowercase= set(state_dict.keys() ) - set(model.state_dict().keys() ) __lowercase= {k for k in extra_keys if not k.endswith('.attn.bias' )} __lowercase= set(model.state_dict().keys() ) - set(state_dict.keys() ) __lowercase= {k for k in missing_keys if not k.endswith('.attn.bias' )} if len(lowercase__ ) != 0: raise ValueError(F'extra keys found: {extra_keys}' ) if len(lowercase__ ) != 0: raise ValueError(F'missing keys: {missing_keys}' ) model.load_state_dict(lowercase__ , strict=lowercase__ ) __lowercase= model.num_parameters(exclude_embeddings=lowercase__ ) __lowercase= checkpoint['best_val_loss'].item() logger.info(F'model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss' ) model.eval() model.to(lowercase__ ) del checkpoint, state_dict return model def _lowerCamelCase( lowercase__ , lowercase__=False , lowercase__="text" ) -> str: '''simple docstring''' if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() __lowercase= 'cpu' # do conversion on cpu __lowercase= _get_ckpt_path(lowercase__ , use_small=lowercase__ ) __lowercase= _load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ ) # load bark initial model __lowercase= _bark_load_model(lowercase__ , 'cpu' , model_type=lowercase__ , use_small=lowercase__ ) if model_type == "text": __lowercase= bark_model['model'] if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params(): raise ValueError('initial and new models don\'t have the same number of parameters' ) # check if same output as the bark model __lowercase= 5 __lowercase= 1_0 if model_type in ["text", "coarse"]: __lowercase= torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int ) __lowercase= bark_model(lowercase__ )[0] __lowercase= model(lowercase__ ) # take last logits __lowercase= output_new_model_total.logits[:, [-1], :] else: __lowercase= 3 __lowercase= 8 __lowercase= torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) __lowercase= model(lowercase__ , lowercase__ ) __lowercase= bark_model(lowercase__ , lowercase__ ) __lowercase= output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError('initial and new outputs don\'t have the same shape' ) if (output_new_model - output_old_model).abs().max().item() > 1E-3: raise ValueError('initial and new outputs are not equal' ) Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) model.save_pretrained(lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> Any: '''simple docstring''' __lowercase= os.path.join(lowercase__ , lowercase__ ) __lowercase= BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , 'config.json' ) ) __lowercase= BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , 'config.json' ) ) __lowercase= BarkFineConfig.from_pretrained(os.path.join(lowercase__ , 'config.json' ) ) __lowercase= EncodecConfig.from_pretrained('facebook/encodec_24khz' ) __lowercase= BarkSemanticModel.from_pretrained(lowercase__ ) __lowercase= BarkCoarseModel.from_pretrained(lowercase__ ) __lowercase= BarkFineModel.from_pretrained(lowercase__ ) __lowercase= EncodecModel.from_pretrained('facebook/encodec_24khz' ) __lowercase= BarkConfig.from_sub_model_configs( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) __lowercase= BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) __lowercase= BarkModel(lowercase__ ) __lowercase= semantic __lowercase= coarseAcoustic __lowercase= fineAcoustic __lowercase= codec __lowercase= bark_generation_config Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''') lowerCAmelCase = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
304
lowerCAmelCase = [ 9_9_9, 8_0_0, 7_9_9, 6_0_0, 5_9_9, 5_0_0, 4_0_0, 3_9_9, 3_7_7, 3_5_5, 3_3_3, 3_1_1, 2_8_8, 2_6_6, 2_4_4, 2_2_2, 2_0_0, 1_9_9, 1_7_7, 1_5_5, 1_3_3, 1_1_1, 8_8, 6_6, 4_4, 2_2, 0, ] lowerCAmelCase = [ 9_9_9, 9_7_6, 9_5_2, 9_2_8, 9_0_5, 8_8_2, 8_5_8, 8_5_7, 8_1_0, 7_6_2, 7_1_5, 7_1_4, 5_7_2, 4_2_9, 4_2_8, 2_8_6, 2_8_5, 2_3_8, 1_9_0, 1_4_3, 1_4_2, 1_1_8, 9_5, 7_1, 4_7, 2_4, 0, ] lowerCAmelCase = [ 9_9_9, 9_8_8, 9_7_7, 9_6_6, 9_5_5, 9_4_4, 9_3_3, 9_2_2, 9_1_1, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_5_0, 3_0_0, 2_9_9, 2_6_6, 2_3_3, 2_0_0, 1_9_9, 1_7_9, 1_5_9, 1_4_0, 1_2_0, 1_0_0, 9_9, 8_8, 7_7, 6_6, 5_5, 4_4, 3_3, 2_2, 1_1, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_5, 9_9_2, 9_8_9, 9_8_5, 9_8_1, 9_7_8, 9_7_5, 9_7_1, 9_6_7, 9_6_4, 9_6_1, 9_5_7, 9_5_6, 9_5_1, 9_4_7, 9_4_2, 9_3_7, 9_3_3, 9_2_8, 9_2_3, 9_1_9, 9_1_4, 9_1_3, 9_0_8, 9_0_3, 8_9_7, 8_9_2, 8_8_7, 8_8_1, 8_7_6, 8_7_1, 8_7_0, 8_6_4, 8_5_8, 8_5_2, 8_4_6, 8_4_0, 8_3_4, 8_2_8, 8_2_7, 8_2_0, 8_1_3, 8_0_6, 7_9_9, 7_9_2, 7_8_5, 7_8_4, 7_7_7, 7_7_0, 7_6_3, 7_5_6, 7_4_9, 7_4_2, 7_4_1, 7_3_3, 7_2_4, 7_1_6, 7_0_7, 6_9_9, 6_9_8, 6_8_8, 6_7_7, 6_6_6, 6_5_6, 6_5_5, 6_4_5, 6_3_4, 6_2_3, 6_1_3, 6_1_2, 5_9_8, 5_8_4, 5_7_0, 5_6_9, 5_5_5, 5_4_1, 5_2_7, 5_2_6, 5_0_5, 4_8_4, 4_8_3, 4_6_2, 4_4_0, 4_3_9, 3_9_6, 3_9_5, 3_5_2, 3_5_1, 3_0_8, 3_0_7, 2_6_4, 2_6_3, 2_2_0, 2_1_9, 1_7_6, 1_3_2, 8_8, 4_4, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_7, 9_9_5, 9_9_2, 9_9_0, 9_8_8, 9_8_6, 9_8_4, 9_8_1, 9_7_9, 9_7_7, 9_7_5, 9_7_2, 9_7_0, 9_6_8, 9_6_6, 9_6_4, 9_6_1, 9_5_9, 9_5_7, 9_5_6, 9_5_4, 9_5_1, 9_4_9, 9_4_6, 9_4_4, 9_4_1, 9_3_9, 9_3_6, 9_3_4, 9_3_1, 9_2_9, 9_2_6, 9_2_4, 9_2_1, 9_1_9, 9_1_6, 9_1_4, 9_1_3, 9_1_0, 9_0_7, 9_0_5, 9_0_2, 8_9_9, 8_9_6, 8_9_3, 8_9_1, 8_8_8, 8_8_5, 8_8_2, 8_7_9, 8_7_7, 8_7_4, 8_7_1, 8_7_0, 8_6_7, 8_6_4, 8_6_1, 8_5_8, 8_5_5, 8_5_2, 8_4_9, 8_4_6, 8_4_3, 8_4_0, 8_3_7, 8_3_4, 8_3_1, 8_2_8, 8_2_7, 8_2_4, 8_2_1, 8_1_7, 8_1_4, 8_1_1, 8_0_8, 8_0_4, 8_0_1, 7_9_8, 7_9_5, 7_9_1, 7_8_8, 7_8_5, 7_8_4, 7_8_0, 7_7_7, 7_7_4, 7_7_0, 7_6_6, 7_6_3, 7_6_0, 7_5_6, 7_5_2, 7_4_9, 7_4_6, 7_4_2, 7_4_1, 7_3_7, 7_3_3, 7_3_0, 7_2_6, 7_2_2, 7_1_8, 7_1_4, 7_1_0, 7_0_7, 7_0_3, 6_9_9, 6_9_8, 6_9_4, 6_9_0, 6_8_5, 6_8_1, 6_7_7, 6_7_3, 6_6_9, 6_6_4, 6_6_0, 6_5_6, 6_5_5, 6_5_0, 6_4_6, 6_4_1, 6_3_6, 6_3_2, 6_2_7, 6_2_2, 6_1_8, 6_1_3, 6_1_2, 6_0_7, 6_0_2, 5_9_6, 5_9_1, 5_8_6, 5_8_0, 5_7_5, 5_7_0, 5_6_9, 5_6_3, 5_5_7, 5_5_1, 5_4_5, 5_3_9, 5_3_3, 5_2_7, 5_2_6, 5_1_9, 5_1_2, 5_0_5, 4_9_8, 4_9_1, 4_8_4, 4_8_3, 4_7_4, 4_6_6, 4_5_7, 4_4_9, 4_4_0, 4_3_9, 4_2_8, 4_1_8, 4_0_7, 3_9_6, 3_9_5, 3_8_1, 3_6_6, 3_5_2, 3_5_1, 3_3_0, 3_0_8, 3_0_7, 2_8_6, 2_6_4, 2_6_3, 2_4_2, 2_2_0, 2_1_9, 1_7_6, 1_7_5, 1_3_2, 1_3_1, 8_8, 4_4, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_1, 9_8_2, 9_7_4, 9_6_6, 9_5_8, 9_5_0, 9_4_1, 9_3_3, 9_2_5, 9_1_6, 9_0_8, 9_0_0, 8_9_9, 8_7_4, 8_5_0, 8_2_5, 8_0_0, 7_9_9, 7_0_0, 6_0_0, 5_0_0, 4_0_0, 3_0_0, 2_0_0, 1_0_0, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_2, 9_8_5, 9_7_8, 9_7_1, 9_6_4, 9_5_7, 9_4_9, 9_4_2, 9_3_5, 9_2_8, 9_2_1, 9_1_4, 9_0_7, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_0_0, 2_9_9, 2_0_0, 1_9_9, 1_0_0, 9_9, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_6, 9_9_2, 9_8_9, 9_8_5, 9_8_2, 9_7_9, 9_7_5, 9_7_2, 9_6_8, 9_6_5, 9_6_1, 9_5_8, 9_5_5, 9_5_1, 9_4_8, 9_4_4, 9_4_1, 9_3_8, 9_3_4, 9_3_1, 9_2_7, 9_2_4, 9_2_0, 9_1_7, 9_1_4, 9_1_0, 9_0_7, 9_0_3, 9_0_0, 8_9_9, 8_9_1, 8_8_4, 8_7_6, 8_6_9, 8_6_1, 8_5_3, 8_4_6, 8_3_8, 8_3_0, 8_2_3, 8_1_5, 8_0_8, 8_0_0, 7_9_9, 7_8_8, 7_7_7, 7_6_6, 7_5_5, 7_4_4, 7_3_3, 7_2_2, 7_1_1, 7_0_0, 6_9_9, 6_8_8, 6_7_7, 6_6_6, 6_5_5, 6_4_4, 6_3_3, 6_2_2, 6_1_1, 6_0_0, 5_9_9, 5_8_5, 5_7_1, 5_5_7, 5_4_2, 5_2_8, 5_1_4, 5_0_0, 4_9_9, 4_8_5, 4_7_1, 4_5_7, 4_4_2, 4_2_8, 4_1_4, 4_0_0, 3_9_9, 3_7_9, 3_5_9, 3_4_0, 3_2_0, 3_0_0, 2_9_9, 2_7_9, 2_5_9, 2_4_0, 2_2_0, 2_0_0, 1_9_9, 1_6_6, 1_3_3, 1_0_0, 9_9, 6_6, 3_3, 0, ]
304
1
from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : List[str] =( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) UpperCamelCase_ : Any =( { '''feature-extraction''': TFMobileBertModel, '''fill-mask''': TFMobileBertForMaskedLM, '''question-answering''': TFMobileBertForQuestionAnswering, '''text-classification''': TFMobileBertForSequenceClassification, '''token-classification''': TFMobileBertForTokenClassification, '''zero-shot''': TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) UpperCamelCase_ : Any =False UpperCamelCase_ : Dict =False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class in get_values(lowerCAmelCase ): __lowercase= tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class A ( A_ ): def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=3_2 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_input_mask __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope __lowercase= embedding_size def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_input_mask: __lowercase= random_attention_mask([self.batch_size, self.seq_length] ) __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= TFMobileBertModel(config=lowerCAmelCase ) __lowercase= {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __lowercase= model(lowerCAmelCase ) __lowercase= [input_ids, input_mask] __lowercase= model(lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= TFMobileBertForMaskedLM(config=lowerCAmelCase ) __lowercase= {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= TFMobileBertForNextSentencePrediction(config=lowerCAmelCase ) __lowercase= {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= TFMobileBertForPreTraining(config=lowerCAmelCase ) __lowercase= {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __lowercase= model(lowerCAmelCase ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= TFMobileBertForSequenceClassification(config=lowerCAmelCase ) __lowercase= {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_choices __lowercase= TFMobileBertForMultipleChoice(config=lowerCAmelCase ) __lowercase= tf.tile(tf.expand_dims(lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowercase= tf.tile(tf.expand_dims(lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowercase= tf.tile(tf.expand_dims(lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowercase= { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= TFMobileBertForTokenClassification(config=lowerCAmelCase ) __lowercase= {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= TFMobileBertForQuestionAnswering(config=lowerCAmelCase ) __lowercase= {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict def _A (self ): __lowercase= TFMobileBertModelTest.TFMobileBertModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , hidden_size=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCAmelCase ) @slow def _A (self ): # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: __lowercase= TFMobileBertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_tf class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' ) __lowercase= tf.constant([[0, 1, 2, 3, 4, 5]] ) __lowercase= model(lowerCAmelCase )[0] __lowercase= [1, 6, 3_0_5_2_2] self.assertEqual(output.shape , lowerCAmelCase ) __lowercase= tf.constant( [ [ [-4.5_91_95_47, -9.24_82_95, -9.64_52_56], [-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37], [-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase , atol=1E-4 )
304
from __future__ import annotations import numpy as np def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' return np.maximum(0 , lowercase__ ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
304
1
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent lowerCAmelCase = {'''UserAgent''': UserAgent().random} def _lowerCamelCase( lowercase__ ) -> dict: '''simple docstring''' __lowercase= script.contents[0] __lowercase= json.loads(data[data.find('{"config"' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class A : def __init__(self , lowerCAmelCase ): __lowercase= f'https://www.instagram.com/{username}/' __lowercase= self.get_json() def _A (self ): __lowercase= requests.get(self.url , headers=lowerCAmelCase ).text __lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__(self ): return f'{self.__class__.__name__}(\'{self.username}\')' def __str__(self ): return f'{self.fullname} ({self.username}) is {self.biography}' @property def _A (self ): return self.user_data["username"] @property def _A (self ): return self.user_data["full_name"] @property def _A (self ): return self.user_data["biography"] @property def _A (self ): return self.user_data["business_email"] @property def _A (self ): return self.user_data["external_url"] @property def _A (self ): return self.user_data["edge_followed_by"]["count"] @property def _A (self ): return self.user_data["edge_follow"]["count"] @property def _A (self ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def _A (self ): return self.user_data["profile_pic_url_hd"] @property def _A (self ): return self.user_data["is_verified"] @property def _A (self ): return self.user_data["is_private"] def _lowerCamelCase( lowercase__ = "github" ) -> None: '''simple docstring''' import os if os.environ.get('CI' ): return # test failing on GitHub Actions __lowercase= InstagramUser(lowercase__ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , lowercase__ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_5_0 assert instagram_user.number_of_followers > 1_2_0_0_0_0 assert instagram_user.number_of_followings > 1_5 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('https://instagram.' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase = InstagramUser('''github''') print(instagram_user) print(F'{instagram_user.number_of_posts = }') print(F'{instagram_user.number_of_followers = }') print(F'{instagram_user.number_of_followings = }') print(F'{instagram_user.email = }') print(F'{instagram_user.website = }') print(F'{instagram_user.profile_picture_url = }') print(F'{instagram_user.is_verified = }') print(F'{instagram_user.is_private = }')
304
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int: '''simple docstring''' __lowercase= 2**power __lowercase= str(lowercase__ ) __lowercase= list(lowercase__ ) __lowercase= 0 for i in list_num: sum_of_num += int(lowercase__ ) return sum_of_num if __name__ == "__main__": lowerCAmelCase = int(input('''Enter the power of 2: ''').strip()) print('''2 ^ ''', power, ''' = ''', 2**power) lowerCAmelCase = solution(power) print('''Sum of the digits is: ''', result)
304
1
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]: '''simple docstring''' if isinstance(lowercase__ , lowercase__ ): __lowercase= np.full((len(lowercase__ ), sequence_length, 2) , lowercase__ ) else: __lowercase= np.full((len(lowercase__ ), sequence_length) , lowercase__ ) for i, tensor in enumerate(lowercase__ ): if padding_side == "right": if isinstance(lowercase__ , lowercase__ ): __lowercase= tensor[:sequence_length] else: __lowercase= tensor[:sequence_length] else: if isinstance(lowercase__ , lowercase__ ): __lowercase= tensor[:sequence_length] else: __lowercase= tensor[:sequence_length] return out_tensor.tolist() def _lowerCamelCase( lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= ord(lowercase__ ) if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6): return True __lowercase= unicodedata.category(lowercase__ ) if cat.startswith('P' ): return True return False @dataclass class A ( A_ ): UpperCamelCase_ : PreTrainedTokenizerBase UpperCamelCase_ : Union[bool, str, PaddingStrategy] =True UpperCamelCase_ : Optional[int] =None UpperCamelCase_ : Optional[int] =None UpperCamelCase_ : int =-100 UpperCamelCase_ : str ="pt" def _A (self , lowerCAmelCase ): import torch __lowercase= 'label' if 'label' in features[0].keys() else 'labels' __lowercase= [feature[label_name] for feature in features] if label_name in features[0].keys() else None __lowercase= self.tokenizer.pad( lowerCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , ) if labels is None: return batch __lowercase= torch.tensor(batch['entity_ids'] ).shape[1] __lowercase= self.tokenizer.padding_side if padding_side == "right": __lowercase= [ list(lowerCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase )) for label in labels ] else: __lowercase= [ [self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase )) + list(lowerCAmelCase ) for label in labels ] __lowercase= [feature['ner_tags'] for feature in features] __lowercase= padding_tensor(lowerCAmelCase , -1 , lowerCAmelCase , lowerCAmelCase ) __lowercase= [feature['original_entity_spans'] for feature in features] __lowercase= padding_tensor(lowerCAmelCase , (-1, -1) , lowerCAmelCase , lowerCAmelCase ) __lowercase= {k: torch.tensor(lowerCAmelCase , dtype=torch.intaa ) for k, v in batch.items()} return batch
304
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int: '''simple docstring''' __lowercase= {} if train_file is not None: __lowercase= [train_file] if eval_file is not None: __lowercase= [eval_file] if test_file is not None: __lowercase= [test_file] __lowercase= datasets.load_dataset('csv' , data_files=lowercase__ ) __lowercase= list(ds[list(files.keys() )[0]].features.keys() ) __lowercase= features_name.pop(lowercase__ ) __lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) ) __lowercase= {label: i for i, label in enumerate(lowercase__ )} __lowercase= tokenizer.model_input_names __lowercase= {} if len(lowercase__ ) == 1: for k in files.keys(): __lowercase= ds[k].map( lambda lowercase__ : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , ) elif len(lowercase__ ) == 2: for k in files.keys(): __lowercase= ds[k].map( lambda lowercase__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: __lowercase= {k: v for k, v in ex.items() if k in input_names} __lowercase= labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: __lowercase= {k: v for k, v in ex.items() if k in input_names} __lowercase= labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: __lowercase= {k: v for k, v in ex.items() if k in input_names} __lowercase= labelaid[ex[label_name]] yield (d, label) __lowercase= ( tf.data.Dataset.from_generator( lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: __lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) __lowercase= ( tf.data.Dataset.from_generator( lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: __lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) __lowercase= ( tf.data.Dataset.from_generator( lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: __lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid lowerCAmelCase = logging.getLogger(__name__) @dataclass class A : UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} ) UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} ) UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} ) UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} ) UpperCamelCase_ : int =field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class A : UpperCamelCase_ : str =field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def _lowerCamelCase( ) -> Optional[Any]: '''simple docstring''' __lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) __lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.info( F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ' F'16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowercase= AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowercase, __lowercase, __lowercase, __lowercase= get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) __lowercase= AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): __lowercase= TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , ) def compute_metrics(lowercase__ ) -> Dict: __lowercase= np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer __lowercase= TFTrainer( model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowercase= {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __lowercase= trainer.evaluate() __lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' ) with open(lowercase__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(F' {key} = {value}' ) writer.write(F'{key} = {value}\n' ) results.update(lowercase__ ) return results if __name__ == "__main__": main()
304
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig lowerCAmelCase = { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''', } class A ( A_ ): UpperCamelCase_ : Optional[int] ='''albert''' def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ): super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase ) __lowercase= vocab_size __lowercase= embedding_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_hidden_groups __lowercase= num_attention_heads __lowercase= inner_group_num __lowercase= hidden_act __lowercase= intermediate_size __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= initializer_range __lowercase= layer_norm_eps __lowercase= classifier_dropout_prob __lowercase= position_embedding_type class A ( A_ ): @property def _A (self ): if self.task == "multiple-choice": __lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'} else: __lowercase= {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
304
import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A ( A_ ): def _A (self ): __lowercase= self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) ) self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) ) class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ): __lowercase= parent __lowercase= batch_size __lowercase= image_size __lowercase= patch_sizes __lowercase= patch_stride __lowercase= patch_padding __lowercase= is_training __lowercase= use_labels __lowercase= num_labels __lowercase= num_channels __lowercase= embed_dim __lowercase= num_heads __lowercase= stride_kv __lowercase= depth __lowercase= cls_token __lowercase= attention_drop_rate __lowercase= initializer_range __lowercase= layer_norm_eps def _A (self ): __lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.num_labels ) __lowercase= self.get_config() return config, pixel_values, labels def _A (self ): return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= CvtModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= (self.image_size, self.image_size) __lowercase, __lowercase= image_size[0], image_size[1] for i in range(len(self.depth ) ): __lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) __lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= CvtForImageClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() __lowercase, __lowercase, __lowercase= config_and_inputs __lowercase= {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else () UpperCamelCase_ : List[str] =( {'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification} if is_torch_available() else {} ) UpperCamelCase_ : str =False UpperCamelCase_ : List[Any] =False UpperCamelCase_ : Any =False UpperCamelCase_ : Union[str, Any] =False UpperCamelCase_ : Tuple =False def _A (self ): __lowercase= CvtModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 ) def _A (self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _A (self ): return @unittest.skip(reason='Cvt does not output attentions' ) def _A (self ): pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def _A (self ): pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def _A (self ): pass def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= model_class(lowerCAmelCase ) __lowercase= inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def _A (self ): def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() with torch.no_grad(): __lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) ) __lowercase= outputs.hidden_states __lowercase= len(self.model_tester.depth ) self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase= True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _A (self ): pass @slow def _A (self ): for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= CvtModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def _lowerCamelCase( ) -> Optional[int]: '''simple docstring''' __lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class A ( unittest.TestCase ): @cached_property def _A (self ): return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def _A (self ): __lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase ) __lowercase= self.default_image_processor __lowercase= prepare_img() __lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase ) # forward pass with torch.no_grad(): __lowercase= model(**lowerCAmelCase ) # verify the logits __lowercase= torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase ) __lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
304
1
import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''sentencepiece.model'''} lowerCAmelCase = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, } lowerCAmelCase = { '''google/rembert''': 2_5_6, } class A ( A_ ): UpperCamelCase_ : int =VOCAB_FILES_NAMES UpperCamelCase_ : int =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="[CLS]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , **lowerCAmelCase , ): super().__init__( do_lower_case=lowerCAmelCase , remove_space=lowerCAmelCase , keep_accents=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= do_lower_case __lowercase= remove_space __lowercase= keep_accents __lowercase= vocab_file __lowercase= spm.SentencePieceProcessor() self.sp_model.Load(lowerCAmelCase ) @property def _A (self ): return len(self.sp_model ) def _A (self ): __lowercase= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self ): __lowercase= self.__dict__.copy() __lowercase= None return state def __setstate__(self , lowerCAmelCase ): __lowercase= d __lowercase= spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def _A (self , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= self.sp_model.EncodeAsPieces(lowerCAmelCase ) return pieces def _A (self , lowerCAmelCase ): return self.sp_model.PieceToId(lowerCAmelCase ) def _A (self , lowerCAmelCase ): return self.sp_model.IdToPiece(lowerCAmelCase ) def _A (self , lowerCAmelCase ): __lowercase= self.sp_model.decode_pieces(lowerCAmelCase ) return out_string def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= [self.sep_token_id] __lowercase= [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(lowerCAmelCase )) + [1] + ([0] * len(lowerCAmelCase )) + [1] return [1] + ([0] * len(lowerCAmelCase )) + [1] def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= [self.sep_token_id] __lowercase= [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not os.path.isdir(lowerCAmelCase ): logger.error('Vocabulary path ({}) should be a directory'.format(lowerCAmelCase ) ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ): copyfile(self.vocab_file , lowerCAmelCase ) return (out_vocab_file,)
304
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MraForMaskedLM''', '''MraForMultipleChoice''', '''MraForQuestionAnswering''', '''MraForSequenceClassification''', '''MraForTokenClassification''', '''MraLayer''', '''MraModel''', '''MraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
304
1
def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' return " ".join( ''.join(word[::-1] ) if len(lowercase__ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('''Hey wollef sroirraw'''))
304
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: ''' lowerCAmelCase = '''======= >>>>>>> ''' lowerCAmelCase = [ '''TextEncoderConfig''', '''ByteTextEncoder''', '''SubwordTextEncoder''', '''encoder_config''', '''maybe_build_from_corpus''', '''manual_dir''', ] lowerCAmelCase = [ # (pattern, replacement) # Order is important here for some replacements (R'''tfds\.core''', R'''datasets'''), (R'''tf\.io\.gfile\.GFile''', R'''open'''), (R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''), (R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''), (R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''), (R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''), (R'''tfds\.features\.FeaturesDict\(''', R'''dict('''), (R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''), (R'''tfds\.''', R'''datasets.'''), (R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''), (R'''self\.builder_config''', R'''self.config'''), ] def _lowerCamelCase( lowercase__ ) -> Optional[int]: '''simple docstring''' return ConvertCommand(args.tfds_path , args.datasets_directory ) class A ( A_ ): @staticmethod def _A (lowerCAmelCase ): __lowercase= parser.add_parser( 'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , ) train_parser.add_argument( '--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , ) train_parser.add_argument( '--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' ) train_parser.set_defaults(func=lowerCAmelCase ) def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= get_logger('datasets-cli/converting' ) __lowercase= tfds_path __lowercase= datasets_directory def _A (self ): if os.path.isdir(self._tfds_path ): __lowercase= os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): __lowercase= os.path.dirname(self._tfds_path ) else: raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' ) __lowercase= os.path.abspath(self._datasets_directory ) self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' ) __lowercase= [] __lowercase= [] __lowercase= {} if os.path.isdir(self._tfds_path ): __lowercase= os.listdir(lowerCAmelCase ) else: __lowercase= [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'Looking at file {f_name}' ) __lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase ) __lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase ) if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('Skipping file' ) continue with open(lowerCAmelCase , encoding='utf-8' ) as f: __lowercase= f.readlines() __lowercase= [] __lowercase= False __lowercase= False __lowercase= [] for line in lines: __lowercase= line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: __lowercase= 'import datasets\n' elif "import tensorflow" in out_line: # order is important here __lowercase= '' continue elif "from absl import logging" in out_line: __lowercase= 'from datasets import logging\n' elif "getLogger" in out_line: __lowercase= out_line.replace('getLogger' , 'get_logger' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): __lowercase= True __lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' ) out_lines.append(lowerCAmelCase ) out_lines.append(lowerCAmelCase ) continue else: for pattern, replacement in TO_CONVERT: __lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: __lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) ) __lowercase= 'from . import ' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'Error converting {out_line.strip()}' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: __lowercase= True out_lines.append(lowerCAmelCase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset __lowercase= f_name.replace('.py' , '' ) __lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase ) __lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase ) os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase ) self._logger.info(f'Adding directory {output_dir}' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(lowerCAmelCase ) if needs_manual_update: with_manual_update.append(lowerCAmelCase ) with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f: f.writelines(lowerCAmelCase ) self._logger.info(f'Converted in {output_file}' ) for utils_file in utils_files: try: __lowercase= os.path.basename(lowerCAmelCase ) __lowercase= imports_to_builder_map[f_name.replace('.py' , '' )] self._logger.info(f'Moving {dest_folder} to {utils_file}' ) shutil.copy(lowerCAmelCase , lowerCAmelCase ) except KeyError: self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
304
1
import collections import os import re from pathlib import Path lowerCAmelCase = '''src/transformers''' # Matches is_xxx_available() lowerCAmelCase = re.compile(R'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} lowerCAmelCase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCAmelCase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available lowerCAmelCase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") lowerCAmelCase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCAmelCase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", lowerCAmelCase = re.compile(R'''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], lowerCAmelCase = re.compile(R'''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo lowerCAmelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: lowerCAmelCase = re.compile(R'''^\s*try:''') # Catches a line with else: lowerCAmelCase = re.compile(R'''^\s*else:''') def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' if _re_test_backend.search(lowercase__ ) is None: return None __lowercase= [b[0] for b in _re_backend.findall(lowercase__ )] backends.sort() return "_and_".join(lowercase__ ) def _lowerCamelCase( lowercase__ ) -> Any: '''simple docstring''' with open(lowercase__ , 'r' , encoding='utf-8' , newline='\n' ) as f: __lowercase= f.readlines() __lowercase= 0 while line_index < len(lowercase__ ) and not lines[line_index].startswith('_import_structure = {' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lowercase__ ): return None # First grab the objects without a specific backend in _import_structure __lowercase= [] while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None: __lowercase= lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(lowercase__ ): __lowercase= _re_one_line_import_struct.search(lowercase__ ).groups()[0] __lowercase= re.findall(R'\[([^\]]+)\]' , lowercase__ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(', ' )] ) line_index += 1 continue __lowercase= _re_import_struct_key_value.search(lowercase__ ) if single_line_import_search is not None: __lowercase= [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowercase__ ) > 0] objects.extend(lowercase__ ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) line_index += 1 __lowercase= {'none': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('if TYPE_CHECKING' ): # If the line is an if not is_backend_available, we grab all objects associated. __lowercase= find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __lowercase= None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __lowercase= [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ): __lowercase= lines[line_index] if _re_import_struct_add_one.search(lowercase__ ) is not None: objects.append(_re_import_struct_add_one.search(lowercase__ ).groups()[0] ) elif _re_import_struct_add_many.search(lowercase__ ) is not None: __lowercase= _re_import_struct_add_many.search(lowercase__ ).groups()[0].split(', ' ) __lowercase= [obj[1:-1] for obj in imports if len(lowercase__ ) > 0] objects.extend(lowercase__ ) elif _re_between_brackets.search(lowercase__ ) is not None: __lowercase= _re_between_brackets.search(lowercase__ ).groups()[0].split(', ' ) __lowercase= [obj[1:-1] for obj in imports if len(lowercase__ ) > 0] objects.extend(lowercase__ ) elif _re_quote_object.search(lowercase__ ) is not None: objects.append(_re_quote_object.search(lowercase__ ).groups()[0] ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) elif line.startswith(' ' * 1_2 + '"' ): objects.append(line[1_3:-3] ) line_index += 1 __lowercase= objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __lowercase= [] while ( line_index < len(lowercase__ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('else' ) ): __lowercase= lines[line_index] __lowercase= _re_import.search(lowercase__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 __lowercase= {'none': objects} # Let's continue with backend-specific objects while line_index < len(lowercase__ ): # If the line is an if is_backend_available, we grab all objects associated. __lowercase= find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __lowercase= None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __lowercase= [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ): __lowercase= lines[line_index] __lowercase= _re_import.search(lowercase__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 1_2 ): objects.append(line[1_2:-2] ) line_index += 1 __lowercase= objects else: line_index += 1 return import_dict_objects, type_hint_objects def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[str]: '''simple docstring''' def find_duplicates(lowercase__ ): return [k for k, v in collections.Counter(lowercase__ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __lowercase= [] for key in import_dict_objects.keys(): __lowercase= find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' ) __lowercase= find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __lowercase= 'base imports' if key == 'none' else F'{key} backend' errors.append(F'Differences for {name}:' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F' {a} in TYPE_HINT but not in _import_structure.' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F' {a} in _import_structure but not in TYPE_HINT.' ) return errors def _lowerCamelCase( ) -> Union[str, Any]: '''simple docstring''' __lowercase= [] for root, _, files in os.walk(lowercase__ ): if "__init__.py" in files: __lowercase= os.path.join(lowercase__ , '__init__.py' ) __lowercase= parse_init(lowercase__ ) if objects is not None: __lowercase= analyze_results(*lowercase__ ) if len(lowercase__ ) > 0: __lowercase= F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}' failures.append('\n'.join(lowercase__ ) ) if len(lowercase__ ) > 0: raise ValueError('\n\n'.join(lowercase__ ) ) def _lowerCamelCase( ) -> str: '''simple docstring''' __lowercase= [] for path, directories, files in os.walk(lowercase__ ): for folder in directories: # Ignore private modules if folder.startswith('_' ): directories.remove(lowercase__ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(lowercase__ ) / folder).glob('*.py' ) ) ) == 0: continue __lowercase= str((Path(lowercase__ ) / folder).relative_to(lowercase__ ) ) __lowercase= short_path.replace(os.path.sep , '.' ) submodules.append(lowercase__ ) for fname in files: if fname == "__init__.py": continue __lowercase= str((Path(lowercase__ ) / fname).relative_to(lowercase__ ) ) __lowercase= short_path.replace('.py' , '' ).replace(os.path.sep , '.' ) if len(submodule.split('.' ) ) == 1: submodules.append(lowercase__ ) return submodules lowerCAmelCase = [ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', '''models.esm.openfold_utils''', ] def _lowerCamelCase( ) -> Optional[int]: '''simple docstring''' from transformers.utils import direct_transformers_import __lowercase= direct_transformers_import(lowercase__ ) __lowercase= set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(lowercase__ , '__init__.py' ) , 'r' ) as f: __lowercase= f.read() import_structure_keys.update(set(re.findall(R'import_structure\[\"([^\"]*)\"\]' , lowercase__ ) ) ) __lowercase= [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(lowercase__ ) > 0: __lowercase= '\n'.join(F'- {module}' for module in module_not_registered ) raise ValueError( 'The following submodules are not properly registed in the main init of Transformers:\n' F'{list_of_modules}\n' 'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' ) if __name__ == "__main__": check_all_inits() check_submodules()
304
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig lowerCAmelCase = { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''', } class A ( A_ ): UpperCamelCase_ : Optional[int] ='''albert''' def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ): super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase ) __lowercase= vocab_size __lowercase= embedding_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_hidden_groups __lowercase= num_attention_heads __lowercase= inner_group_num __lowercase= hidden_act __lowercase= intermediate_size __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= initializer_range __lowercase= layer_norm_eps __lowercase= classifier_dropout_prob __lowercase= position_embedding_type class A ( A_ ): @property def _A (self ): if self.task == "multiple-choice": __lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'} else: __lowercase= {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
304
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MraForMaskedLM''', '''MraForMultipleChoice''', '''MraForQuestionAnswering''', '''MraForSequenceClassification''', '''MraForTokenClassification''', '''MraLayer''', '''MraModel''', '''MraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
304
import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]: '''simple docstring''' __lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' __lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' ) __lowercase= transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ), ] ) __lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ ) return image def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' if "visual_encoder" in key: __lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ ) if "blocks" in key: __lowercase= re.sub(R'blocks' , 'layers' , lowercase__ ) if "attn" in key: __lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ ) if "norm1" in key: __lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ ) if "norm2" in key: __lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ ) if "encoder.norm" in key: __lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ ) if "encoder.patch_embed.proj" in key: __lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ ) if "encoder.pos_embed" in key: __lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ ) if "encoder.cls_token" in key: __lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ ) if "self_attn" in key: __lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ ) return key @torch.no_grad() def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int: '''simple docstring''' if config_path is not None: __lowercase= BlipConfig.from_pretrained(lowercase__ ) else: __lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} ) __lowercase= BlipForConditionalGeneration(lowercase__ ).eval() __lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth' __lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' ) __lowercase= pt_model.eval() __lowercase= pt_model.state_dict() for key in modified_state_dict.copy(): __lowercase= modified_state_dict.pop(lowercase__ ) __lowercase= rename_key(lowercase__ ) __lowercase= value hf_model.load_state_dict(lowercase__ ) __lowercase= 3_8_4 __lowercase= load_demo_image(image_size=lowercase__ , device='cpu' ) __lowercase= BertTokenizer.from_pretrained('bert-base-uncased' ) __lowercase= tokenizer(['a picture of'] ).input_ids __lowercase= hf_model.generate(lowercase__ , lowercase__ ) assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2] __lowercase= hf_model.generate(lowercase__ ) assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(lowercase__ ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' __lowercase= ( 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth' ) __lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' ) vqa_model.eval() __lowercase= vqa_model.state_dict() for key in modified_state_dict.copy(): __lowercase= modified_state_dict.pop(lowercase__ ) __lowercase= rename_key(lowercase__ ) __lowercase= value __lowercase= BlipForQuestionAnswering(lowercase__ ) hf_vqa_model.load_state_dict(lowercase__ ) __lowercase= ['How many dogs are in this image?'] __lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids __lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' ) __lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth' __lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' ) itm_model.eval() __lowercase= itm_model.state_dict() for key in modified_state_dict.copy(): __lowercase= modified_state_dict.pop(lowercase__ ) __lowercase= rename_key(lowercase__ ) __lowercase= value __lowercase= BlipForImageTextRetrieval(lowercase__ ) __lowercase= ['A picture of a woman with a dog sitting in a beach'] __lowercase= tokenizer( lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids hf_itm_model.load_state_dict(lowercase__ ) hf_itm_model.eval() __lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ ) __lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ ) assert out[0].item() == 0.2110_6874_9427_7954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') lowerCAmelCase = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
304
1
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
304
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1) lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2) @dataclass class A : UpperCamelCase_ : int UpperCamelCase_ : Node | None class A : def __init__(self , lowerCAmelCase ): __lowercase= None for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ): __lowercase= Node(lowerCAmelCase , self.head ) def __iter__(self ): __lowercase= self.head while node: yield node.data __lowercase= node.next_node def __len__(self ): return sum(1 for _ in self ) def __str__(self ): return " -> ".join([str(lowerCAmelCase ) for node in self] ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList: '''simple docstring''' return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
304
1
def _lowerCamelCase( lowercase__ ) -> list[list]: '''simple docstring''' __lowercase= current_set.copy() for row_index, row in enumerate(lowercase__ ): __lowercase= row[0] for column_index, column in enumerate(lowercase__ ): if magnitude == 0: __lowercase= column continue __lowercase= column / magnitude # Subtract to cancel term __lowercase= current_set[0] __lowercase= [first_row] __lowercase= current_set[1::] for row in current_set: __lowercase= [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(lowercase__ ) continue for column_index in range(len(lowercase__ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(lowercase__ ) # Create next recursion iteration set if len(final_set[0] ) != 3: __lowercase= final_set[0] __lowercase= [] __lowercase= [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) __lowercase= simplify(lowercase__ ) for i in range(len(lowercase__ ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , lowercase__ ) __lowercase= resultant return final_set def _lowerCamelCase( lowercase__ ) -> list: '''simple docstring''' if len(lowercase__ ) == 0: raise IndexError('solve_simultaneous() requires n lists of length n+1' ) __lowercase= len(lowercase__ ) + 1 if any(len(lowercase__ ) != _length for item in equations ): raise IndexError('solve_simultaneous() requires n lists of length n+1' ) for row in equations: if any(not isinstance(lowercase__ , (int, float) ) for column in row ): raise ValueError('solve_simultaneous() requires lists of integers' ) if len(lowercase__ ) == 1: return [equations[0][-1] / equations[0][0]] __lowercase= equations.copy() if any(0 in row for row in data_set ): __lowercase= data_set.copy() __lowercase= [] for row_index, row in enumerate(lowercase__ ): if 0 not in row: __lowercase= data_set.pop(lowercase__ ) break if not full_row: raise ValueError('solve_simultaneous() requires at least 1 full equation' ) data_set.insert(0 , lowercase__ ) __lowercase= data_set.copy() __lowercase= simplify(lowercase__ ) __lowercase= simplified[::-1] __lowercase= [] for row in simplified: __lowercase= row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue __lowercase= row.copy()[: len(lowercase__ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(lowercase__ ) == 0: solutions.append(0 ) continue __lowercase= temp_row[1::] __lowercase= temp_row[::-1] for column_index, column in enumerate(lowercase__ ): current_solution -= column * solutions[column_index] solutions.append(lowercase__ ) __lowercase= [] for item in solutions: final.append(float(round(lowercase__ , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
304
from __future__ import annotations from collections.abc import Callable def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float: '''simple docstring''' __lowercase= x_start __lowercase= fnc(lowercase__ ) __lowercase= 0.0 for _ in range(lowercase__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area __lowercase= (x_end - x_start) / steps + xa __lowercase= fnc(lowercase__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step __lowercase= xa __lowercase= fxa return area if __name__ == "__main__": def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' return x**3 + x**2 print('''f(x) = x^3 + x^2''') print('''The area between the curve, x = -5, x = 5 and the x axis is:''') lowerCAmelCase = 1_0 while i <= 1_0_0_0_0_0: print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}') i *= 1_0
304
1
lowerCAmelCase = [ 9_9_9, 8_0_0, 7_9_9, 6_0_0, 5_9_9, 5_0_0, 4_0_0, 3_9_9, 3_7_7, 3_5_5, 3_3_3, 3_1_1, 2_8_8, 2_6_6, 2_4_4, 2_2_2, 2_0_0, 1_9_9, 1_7_7, 1_5_5, 1_3_3, 1_1_1, 8_8, 6_6, 4_4, 2_2, 0, ] lowerCAmelCase = [ 9_9_9, 9_7_6, 9_5_2, 9_2_8, 9_0_5, 8_8_2, 8_5_8, 8_5_7, 8_1_0, 7_6_2, 7_1_5, 7_1_4, 5_7_2, 4_2_9, 4_2_8, 2_8_6, 2_8_5, 2_3_8, 1_9_0, 1_4_3, 1_4_2, 1_1_8, 9_5, 7_1, 4_7, 2_4, 0, ] lowerCAmelCase = [ 9_9_9, 9_8_8, 9_7_7, 9_6_6, 9_5_5, 9_4_4, 9_3_3, 9_2_2, 9_1_1, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_5_0, 3_0_0, 2_9_9, 2_6_6, 2_3_3, 2_0_0, 1_9_9, 1_7_9, 1_5_9, 1_4_0, 1_2_0, 1_0_0, 9_9, 8_8, 7_7, 6_6, 5_5, 4_4, 3_3, 2_2, 1_1, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_5, 9_9_2, 9_8_9, 9_8_5, 9_8_1, 9_7_8, 9_7_5, 9_7_1, 9_6_7, 9_6_4, 9_6_1, 9_5_7, 9_5_6, 9_5_1, 9_4_7, 9_4_2, 9_3_7, 9_3_3, 9_2_8, 9_2_3, 9_1_9, 9_1_4, 9_1_3, 9_0_8, 9_0_3, 8_9_7, 8_9_2, 8_8_7, 8_8_1, 8_7_6, 8_7_1, 8_7_0, 8_6_4, 8_5_8, 8_5_2, 8_4_6, 8_4_0, 8_3_4, 8_2_8, 8_2_7, 8_2_0, 8_1_3, 8_0_6, 7_9_9, 7_9_2, 7_8_5, 7_8_4, 7_7_7, 7_7_0, 7_6_3, 7_5_6, 7_4_9, 7_4_2, 7_4_1, 7_3_3, 7_2_4, 7_1_6, 7_0_7, 6_9_9, 6_9_8, 6_8_8, 6_7_7, 6_6_6, 6_5_6, 6_5_5, 6_4_5, 6_3_4, 6_2_3, 6_1_3, 6_1_2, 5_9_8, 5_8_4, 5_7_0, 5_6_9, 5_5_5, 5_4_1, 5_2_7, 5_2_6, 5_0_5, 4_8_4, 4_8_3, 4_6_2, 4_4_0, 4_3_9, 3_9_6, 3_9_5, 3_5_2, 3_5_1, 3_0_8, 3_0_7, 2_6_4, 2_6_3, 2_2_0, 2_1_9, 1_7_6, 1_3_2, 8_8, 4_4, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_7, 9_9_5, 9_9_2, 9_9_0, 9_8_8, 9_8_6, 9_8_4, 9_8_1, 9_7_9, 9_7_7, 9_7_5, 9_7_2, 9_7_0, 9_6_8, 9_6_6, 9_6_4, 9_6_1, 9_5_9, 9_5_7, 9_5_6, 9_5_4, 9_5_1, 9_4_9, 9_4_6, 9_4_4, 9_4_1, 9_3_9, 9_3_6, 9_3_4, 9_3_1, 9_2_9, 9_2_6, 9_2_4, 9_2_1, 9_1_9, 9_1_6, 9_1_4, 9_1_3, 9_1_0, 9_0_7, 9_0_5, 9_0_2, 8_9_9, 8_9_6, 8_9_3, 8_9_1, 8_8_8, 8_8_5, 8_8_2, 8_7_9, 8_7_7, 8_7_4, 8_7_1, 8_7_0, 8_6_7, 8_6_4, 8_6_1, 8_5_8, 8_5_5, 8_5_2, 8_4_9, 8_4_6, 8_4_3, 8_4_0, 8_3_7, 8_3_4, 8_3_1, 8_2_8, 8_2_7, 8_2_4, 8_2_1, 8_1_7, 8_1_4, 8_1_1, 8_0_8, 8_0_4, 8_0_1, 7_9_8, 7_9_5, 7_9_1, 7_8_8, 7_8_5, 7_8_4, 7_8_0, 7_7_7, 7_7_4, 7_7_0, 7_6_6, 7_6_3, 7_6_0, 7_5_6, 7_5_2, 7_4_9, 7_4_6, 7_4_2, 7_4_1, 7_3_7, 7_3_3, 7_3_0, 7_2_6, 7_2_2, 7_1_8, 7_1_4, 7_1_0, 7_0_7, 7_0_3, 6_9_9, 6_9_8, 6_9_4, 6_9_0, 6_8_5, 6_8_1, 6_7_7, 6_7_3, 6_6_9, 6_6_4, 6_6_0, 6_5_6, 6_5_5, 6_5_0, 6_4_6, 6_4_1, 6_3_6, 6_3_2, 6_2_7, 6_2_2, 6_1_8, 6_1_3, 6_1_2, 6_0_7, 6_0_2, 5_9_6, 5_9_1, 5_8_6, 5_8_0, 5_7_5, 5_7_0, 5_6_9, 5_6_3, 5_5_7, 5_5_1, 5_4_5, 5_3_9, 5_3_3, 5_2_7, 5_2_6, 5_1_9, 5_1_2, 5_0_5, 4_9_8, 4_9_1, 4_8_4, 4_8_3, 4_7_4, 4_6_6, 4_5_7, 4_4_9, 4_4_0, 4_3_9, 4_2_8, 4_1_8, 4_0_7, 3_9_6, 3_9_5, 3_8_1, 3_6_6, 3_5_2, 3_5_1, 3_3_0, 3_0_8, 3_0_7, 2_8_6, 2_6_4, 2_6_3, 2_4_2, 2_2_0, 2_1_9, 1_7_6, 1_7_5, 1_3_2, 1_3_1, 8_8, 4_4, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_1, 9_8_2, 9_7_4, 9_6_6, 9_5_8, 9_5_0, 9_4_1, 9_3_3, 9_2_5, 9_1_6, 9_0_8, 9_0_0, 8_9_9, 8_7_4, 8_5_0, 8_2_5, 8_0_0, 7_9_9, 7_0_0, 6_0_0, 5_0_0, 4_0_0, 3_0_0, 2_0_0, 1_0_0, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_2, 9_8_5, 9_7_8, 9_7_1, 9_6_4, 9_5_7, 9_4_9, 9_4_2, 9_3_5, 9_2_8, 9_2_1, 9_1_4, 9_0_7, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_0_0, 2_9_9, 2_0_0, 1_9_9, 1_0_0, 9_9, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_6, 9_9_2, 9_8_9, 9_8_5, 9_8_2, 9_7_9, 9_7_5, 9_7_2, 9_6_8, 9_6_5, 9_6_1, 9_5_8, 9_5_5, 9_5_1, 9_4_8, 9_4_4, 9_4_1, 9_3_8, 9_3_4, 9_3_1, 9_2_7, 9_2_4, 9_2_0, 9_1_7, 9_1_4, 9_1_0, 9_0_7, 9_0_3, 9_0_0, 8_9_9, 8_9_1, 8_8_4, 8_7_6, 8_6_9, 8_6_1, 8_5_3, 8_4_6, 8_3_8, 8_3_0, 8_2_3, 8_1_5, 8_0_8, 8_0_0, 7_9_9, 7_8_8, 7_7_7, 7_6_6, 7_5_5, 7_4_4, 7_3_3, 7_2_2, 7_1_1, 7_0_0, 6_9_9, 6_8_8, 6_7_7, 6_6_6, 6_5_5, 6_4_4, 6_3_3, 6_2_2, 6_1_1, 6_0_0, 5_9_9, 5_8_5, 5_7_1, 5_5_7, 5_4_2, 5_2_8, 5_1_4, 5_0_0, 4_9_9, 4_8_5, 4_7_1, 4_5_7, 4_4_2, 4_2_8, 4_1_4, 4_0_0, 3_9_9, 3_7_9, 3_5_9, 3_4_0, 3_2_0, 3_0_0, 2_9_9, 2_7_9, 2_5_9, 2_4_0, 2_2_0, 2_0_0, 1_9_9, 1_6_6, 1_3_3, 1_0_0, 9_9, 6_6, 3_3, 0, ]
304
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_input_lengths __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= gelu_activation __lowercase= sinusoidal_embeddings __lowercase= causal __lowercase= asm __lowercase= n_langs __lowercase= vocab_size __lowercase= n_special __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= summary_type __lowercase= use_proj __lowercase= scope __lowercase= bos_token_id def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= random_attention_mask([self.batch_size, self.seq_length] ) __lowercase= None if self.use_input_lengths: __lowercase= ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , 2 ).float() __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _A (self ): return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , langs=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMWithLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) __lowercase= outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForQuestionAnswering(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model( lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , ) __lowercase= model( lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , ) ((__lowercase), )= result_with_labels.to_tuple() __lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) ((__lowercase), )= result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= self.num_labels __lowercase= XLMForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= self.num_choices __lowercase= XLMForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : int =( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) UpperCamelCase_ : Dict =( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable UpperCamelCase_ : str =( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def _A (self ): __lowercase= XLMModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ): self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertListEqual( [isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) ) self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(lowerCAmelCase ): # adds PAD dummy token __lowercase= min_length + idx + 1 __lowercase= min_length + idx + 1 __lowercase= ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ): self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertListEqual( [isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , ) self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(lowerCAmelCase ): # adds PAD dummy token __lowercase= min_length + idx + 1 __lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , ) pass @slow def _A (self ): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= XLMModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(lowerCAmelCase ) __lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president __lowercase= [ 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference __lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
304
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class A ( A_ ): UpperCamelCase_ : str =['''image_processor''', '''tokenizer'''] UpperCamelCase_ : Dict ='''ChineseCLIPImageProcessor''' UpperCamelCase_ : Union[str, Any] =('''BertTokenizer''', '''BertTokenizerFast''') def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase ): __lowercase= None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , lowerCAmelCase , ) __lowercase= kwargs.pop('feature_extractor' ) __lowercase= image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(lowerCAmelCase , lowerCAmelCase ) __lowercase= self.image_processor def __call__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase ): if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: __lowercase= self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase ) if images is not None: __lowercase= self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase ) if text is not None and images is not None: __lowercase= image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase ) def _A (self , *lowerCAmelCase , **lowerCAmelCase ): return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase ) def _A (self , *lowerCAmelCase , **lowerCAmelCase ): return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase ) @property def _A (self ): __lowercase= self.tokenizer.model_input_names __lowercase= self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _A (self ): warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase , ) return self.image_processor_class
304
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent lowerCAmelCase = {'''UserAgent''': UserAgent().random} def _lowerCamelCase( lowercase__ ) -> dict: '''simple docstring''' __lowercase= script.contents[0] __lowercase= json.loads(data[data.find('{"config"' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class A : def __init__(self , lowerCAmelCase ): __lowercase= f'https://www.instagram.com/{username}/' __lowercase= self.get_json() def _A (self ): __lowercase= requests.get(self.url , headers=lowerCAmelCase ).text __lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__(self ): return f'{self.__class__.__name__}(\'{self.username}\')' def __str__(self ): return f'{self.fullname} ({self.username}) is {self.biography}' @property def _A (self ): return self.user_data["username"] @property def _A (self ): return self.user_data["full_name"] @property def _A (self ): return self.user_data["biography"] @property def _A (self ): return self.user_data["business_email"] @property def _A (self ): return self.user_data["external_url"] @property def _A (self ): return self.user_data["edge_followed_by"]["count"] @property def _A (self ): return self.user_data["edge_follow"]["count"] @property def _A (self ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def _A (self ): return self.user_data["profile_pic_url_hd"] @property def _A (self ): return self.user_data["is_verified"] @property def _A (self ): return self.user_data["is_private"] def _lowerCamelCase( lowercase__ = "github" ) -> None: '''simple docstring''' import os if os.environ.get('CI' ): return # test failing on GitHub Actions __lowercase= InstagramUser(lowercase__ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , lowercase__ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_5_0 assert instagram_user.number_of_followers > 1_2_0_0_0_0 assert instagram_user.number_of_followings > 1_5 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('https://instagram.' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase = InstagramUser('''github''') print(instagram_user) print(F'{instagram_user.number_of_posts = }') print(F'{instagram_user.number_of_followers = }') print(F'{instagram_user.number_of_followings = }') print(F'{instagram_user.email = }') print(F'{instagram_user.website = }') print(F'{instagram_user.profile_picture_url = }') print(F'{instagram_user.is_verified = }') print(F'{instagram_user.is_private = }')
304
1
import logging from transformers.configuration_utils import PretrainedConfig lowerCAmelCase = logging.getLogger(__name__) class A ( A_ ): UpperCamelCase_ : Tuple ='''masked_bert''' def __init__(self , lowerCAmelCase=3_0_5_2_2 , lowerCAmelCase=7_6_8 , lowerCAmelCase=1_2 , lowerCAmelCase=1_2 , lowerCAmelCase=3_0_7_2 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0 , lowerCAmelCase="topK" , lowerCAmelCase="constant" , lowerCAmelCase=0.0 , **lowerCAmelCase , ): super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase ) __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= hidden_act __lowercase= intermediate_size __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= initializer_range __lowercase= layer_norm_eps __lowercase= pruning_method __lowercase= mask_init __lowercase= mask_scale
304
from typing import Any import numpy as np def _lowerCamelCase( lowercase__ ) -> bool: '''simple docstring''' return np.array_equal(lowercase__ , matrix.conjugate().T ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' __lowercase= v.conjugate().T __lowercase= v_star.dot(lowercase__ ) assert isinstance(lowercase__ , np.ndarray ) return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ )) def _lowerCamelCase( ) -> None: '''simple docstring''' __lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) __lowercase= np.array([[1], [2], [3]] ) assert is_hermitian(lowercase__ ), F'{a} is not hermitian.' print(rayleigh_quotient(lowercase__ , lowercase__ ) ) __lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(lowercase__ ), F'{a} is not hermitian.' assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
304
1
import colorsys from PIL import Image # type: ignore def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> float: '''simple docstring''' __lowercase= x __lowercase= y for step in range(lowercase__ ): # noqa: B007 __lowercase= a * a - b * b + x __lowercase= 2 * a * b + y __lowercase= a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def _lowerCamelCase( lowercase__ ) -> tuple: '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (2_5_5, 2_5_5, 2_5_5) def _lowerCamelCase( lowercase__ ) -> tuple: '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(lowercase__ , 1 , 1 ) ) def _lowerCamelCase( lowercase__ = 8_0_0 , lowercase__ = 6_0_0 , lowercase__ = -0.6 , lowercase__ = 0 , lowercase__ = 3.2 , lowercase__ = 5_0 , lowercase__ = True , ) -> Image.Image: '''simple docstring''' __lowercase= Image.new('RGB' , (image_width, image_height) ) __lowercase= img.load() # loop through the image-coordinates for image_x in range(lowercase__ ): for image_y in range(lowercase__ ): # determine the figure-coordinates based on the image-coordinates __lowercase= figure_width / image_width * image_height __lowercase= figure_center_x + (image_x / image_width - 0.5) * figure_width __lowercase= figure_center_y + (image_y / image_height - 0.5) * figure_height __lowercase= get_distance(lowercase__ , lowercase__ , lowercase__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: __lowercase= get_color_coded_rgb(lowercase__ ) else: __lowercase= get_black_and_white_rgb(lowercase__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure lowerCAmelCase = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
304
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase = logging.get_logger(__name__) class A ( A_ ): UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask'''] def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ): super().__init__( feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= spectrogram_length __lowercase= num_channels __lowercase= patch_size __lowercase= feature_size // self.patch_size[1] __lowercase= n_fft __lowercase= sampling_rate // hop_length_to_sampling_rate __lowercase= sampling_rate __lowercase= padding_value __lowercase= mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T def _A (self , lowerCAmelCase ): __lowercase= spectrogram( lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , ) __lowercase= log_spec[:, :-1] __lowercase= log_spec - 20.0 __lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled' f' with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) __lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) __lowercase= is_batched_numpy or ( isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ): __lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa ) elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __lowercase= raw_speech.astype(np.floataa ) # always return batch if not is_batched: __lowercase= [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __lowercase= [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , lowerCAmelCase ): __lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __lowercase= max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __lowercase= [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __lowercase= np.array(lowerCAmelCase ).astype(np.floataa ) # convert into correct format for padding __lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __lowercase= padded_audio_features * self.padding_value for i in range(len(lowerCAmelCase ) ): __lowercase= audio_features[i] __lowercase= feature # return as BatchFeature if return_attention_mask: __lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: __lowercase= {'audio_values': padded_audio_features} __lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase ) return encoded_inputs
304
1
import warnings warnings.warn( '''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: ''' '''`from accelerate import find_executable_batch_size` to avoid this warning.''', FutureWarning, )
304
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' __lowercase= create_tensor(lowercase__ ) __lowercase= gather(lowercase__ ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' __lowercase= [state.process_index] __lowercase= gather_object(lowercase__ ) assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}' assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}' def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' __lowercase= create_tensor(lowercase__ ) __lowercase= broadcast(lowercase__ ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def _lowerCamelCase( lowercase__ ) -> List[Any]: '''simple docstring''' if state.is_main_process: __lowercase= torch.arange(state.num_processes + 1 ).to(state.device ) else: __lowercase= torch.arange(state.num_processes ).to(state.device ) __lowercase= pad_across_processes(lowercase__ ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def _lowerCamelCase( lowercase__ ) -> Any: '''simple docstring''' if state.num_processes != 2: return __lowercase= create_tensor(lowercase__ ) __lowercase= reduce(lowercase__ , 'sum' ) __lowercase= torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}' def _lowerCamelCase( lowercase__ ) -> Union[str, Any]: '''simple docstring''' if state.num_processes != 2: return __lowercase= create_tensor(lowercase__ ) __lowercase= reduce(lowercase__ , 'mean' ) __lowercase= torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}' def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' main() def _lowerCamelCase( ) -> List[str]: '''simple docstring''' __lowercase= PartialState() state.print(F'State: {state}' ) state.print('testing gather' ) test_gather(lowercase__ ) state.print('testing gather_object' ) test_gather_object(lowercase__ ) state.print('testing broadcast' ) test_broadcast(lowercase__ ) state.print('testing pad_across_processes' ) test_pad_across_processes(lowercase__ ) state.print('testing reduce_sum' ) test_reduce_sum(lowercase__ ) state.print('testing reduce_mean' ) test_reduce_mean(lowercase__ ) if __name__ == "__main__": main()
304
1
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig lowerCAmelCase = logging.get_logger(__name__) # General docstring lowerCAmelCase = '''MobileNetV1Config''' # Base docstring lowerCAmelCase = '''google/mobilenet_v1_1.0_224''' lowerCAmelCase = [1, 1_0_2_4, 7, 7] # Image classification docstring lowerCAmelCase = '''google/mobilenet_v1_1.0_224''' lowerCAmelCase = '''tabby, tabby cat''' lowerCAmelCase = [ '''google/mobilenet_v1_1.0_224''', '''google/mobilenet_v1_0.75_192''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=None ) -> Union[str, Any]: '''simple docstring''' __lowercase= {} if isinstance(lowercase__ , lowercase__ ): __lowercase= model.mobilenet_va else: __lowercase= model __lowercase= 'MobilenetV1/Conv2d_0/' __lowercase= backbone.conv_stem.convolution.weight __lowercase= backbone.conv_stem.normalization.bias __lowercase= backbone.conv_stem.normalization.weight __lowercase= backbone.conv_stem.normalization.running_mean __lowercase= backbone.conv_stem.normalization.running_var for i in range(1_3 ): __lowercase= i + 1 __lowercase= i * 2 __lowercase= backbone.layer[pt_index] __lowercase= F'MobilenetV1/Conv2d_{tf_index}_depthwise/' __lowercase= pointer.convolution.weight __lowercase= pointer.normalization.bias __lowercase= pointer.normalization.weight __lowercase= pointer.normalization.running_mean __lowercase= pointer.normalization.running_var __lowercase= backbone.layer[pt_index + 1] __lowercase= F'MobilenetV1/Conv2d_{tf_index}_pointwise/' __lowercase= pointer.convolution.weight __lowercase= pointer.normalization.bias __lowercase= pointer.normalization.weight __lowercase= pointer.normalization.running_mean __lowercase= pointer.normalization.running_var if isinstance(lowercase__ , lowercase__ ): __lowercase= 'MobilenetV1/Logits/Conv2d_1c_1x1/' __lowercase= model.classifier.weight __lowercase= model.classifier.bias return tf_to_pt_map def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> int: '''simple docstring''' try: import numpy as np import tensorflow as tf except ImportError: logger.error( 'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ' 'https://www.tensorflow.org/install/ for installation instructions.' ) raise # Load weights from TF model __lowercase= tf.train.list_variables(lowercase__ ) __lowercase= {} for name, shape in init_vars: logger.info(F'Loading TF weight {name} with shape {shape}' ) __lowercase= tf.train.load_variable(lowercase__ , lowercase__ ) __lowercase= array # Build TF to PyTorch weights loading map __lowercase= _build_tf_to_pytorch_map(lowercase__ , lowercase__ , lowercase__ ) for name, pointer in tf_to_pt_map.items(): logger.info(F'Importing {name}' ) if name not in tf_weights: logger.info(F'{name} not in tf pre-trained weights, skipping' ) continue __lowercase= tf_weights[name] if "depthwise_weights" in name: logger.info('Transposing depthwise' ) __lowercase= np.transpose(lowercase__ , (2, 3, 0, 1) ) elif "weights" in name: logger.info('Transposing' ) if len(pointer.shape ) == 2: # copying into linear layer __lowercase= array.squeeze().transpose() else: __lowercase= np.transpose(lowercase__ , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' ) logger.info(F'Initialize PyTorch weight {name} {array.shape}' ) __lowercase= torch.from_numpy(lowercase__ ) tf_weights.pop(lowercase__ , lowercase__ ) tf_weights.pop(name + '/RMSProp' , lowercase__ ) tf_weights.pop(name + '/RMSProp_1' , lowercase__ ) tf_weights.pop(name + '/ExponentialMovingAverage' , lowercase__ ) logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' ) return model def _lowerCamelCase( lowercase__ , lowercase__ ) -> torch.Tensor: '''simple docstring''' __lowercase, __lowercase= features.shape[-2:] __lowercase, __lowercase= conv_layer.stride __lowercase, __lowercase= conv_layer.kernel_size if in_height % stride_height == 0: __lowercase= max(kernel_height - stride_height , 0 ) else: __lowercase= max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: __lowercase= max(kernel_width - stride_width , 0 ) else: __lowercase= max(kernel_width - (in_width % stride_width) , 0 ) __lowercase= pad_along_width // 2 __lowercase= pad_along_width - pad_left __lowercase= pad_along_height // 2 __lowercase= pad_along_height - pad_top __lowercase= (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(lowercase__ , lowercase__ , 'constant' , 0.0 ) class A ( nn.Module ): def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 , lowerCAmelCase = 1 , lowerCAmelCase = False , lowerCAmelCase = True , lowerCAmelCase = True , ): super().__init__() __lowercase= config if in_channels % groups != 0: raise ValueError(f'Input channels ({in_channels}) are not divisible by {groups} groups.' ) if out_channels % groups != 0: raise ValueError(f'Output channels ({out_channels}) are not divisible by {groups} groups.' ) __lowercase= 0 if config.tf_padding else int((kernel_size - 1) / 2 ) __lowercase= nn.Convad( in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , kernel_size=lowerCAmelCase , stride=lowerCAmelCase , padding=lowerCAmelCase , groups=lowerCAmelCase , bias=lowerCAmelCase , padding_mode='zeros' , ) if use_normalization: __lowercase= nn.BatchNormad( num_features=lowerCAmelCase , eps=config.layer_norm_eps , momentum=0.99_97 , affine=lowerCAmelCase , track_running_stats=lowerCAmelCase , ) else: __lowercase= None if use_activation: if isinstance(lowerCAmelCase , lowerCAmelCase ): __lowercase= ACTaFN[use_activation] elif isinstance(config.hidden_act , lowerCAmelCase ): __lowercase= ACTaFN[config.hidden_act] else: __lowercase= config.hidden_act else: __lowercase= None def _A (self , lowerCAmelCase ): if self.config.tf_padding: __lowercase= apply_tf_padding(lowerCAmelCase , self.convolution ) __lowercase= self.convolution(lowerCAmelCase ) if self.normalization is not None: __lowercase= self.normalization(lowerCAmelCase ) if self.activation is not None: __lowercase= self.activation(lowerCAmelCase ) return features class A ( A_ ): UpperCamelCase_ : Optional[int] =MobileNetVaConfig UpperCamelCase_ : str =load_tf_weights_in_mobilenet_va UpperCamelCase_ : Union[str, Any] ='''mobilenet_v1''' UpperCamelCase_ : str ='''pixel_values''' UpperCamelCase_ : List[str] =False def _A (self , lowerCAmelCase ): if isinstance(lowerCAmelCase , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(lowerCAmelCase , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) lowerCAmelCase = R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' lowerCAmelCase = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( '''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , A_ , ) class A ( A_ ): def __init__(self , lowerCAmelCase , lowerCAmelCase = True ): super().__init__(lowerCAmelCase ) __lowercase= config __lowercase= 3_2 __lowercase= max(int(depth * config.depth_multiplier ) , config.min_depth ) __lowercase= MobileNetVaConvLayer( lowerCAmelCase , in_channels=config.num_channels , out_channels=lowerCAmelCase , kernel_size=3 , stride=2 , ) __lowercase= [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] __lowercase= nn.ModuleList() for i in range(1_3 ): __lowercase= out_channels if strides[i] == 2 or i == 0: depth *= 2 __lowercase= max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( lowerCAmelCase , in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , kernel_size=3 , stride=strides[i] , groups=lowerCAmelCase , ) ) self.layer.append( MobileNetVaConvLayer( lowerCAmelCase , in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , kernel_size=1 , ) ) __lowercase= nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def _A (self , lowerCAmelCase ): raise NotImplementedError @add_start_docstrings_to_model_forward(lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _A (self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ): __lowercase= ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase= return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values' ) __lowercase= self.conv_stem(lowerCAmelCase ) __lowercase= () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): __lowercase= layer_module(lowerCAmelCase ) if output_hidden_states: __lowercase= all_hidden_states + (hidden_states,) __lowercase= hidden_states if self.pooler is not None: __lowercase= torch.flatten(self.pooler(lowerCAmelCase ) , start_dim=1 ) else: __lowercase= None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase , pooler_output=lowerCAmelCase , hidden_states=lowerCAmelCase , ) @add_start_docstrings( ''' MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , A_ , ) class A ( A_ ): def __init__(self , lowerCAmelCase ): super().__init__(lowerCAmelCase ) __lowercase= config.num_labels __lowercase= MobileNetVaModel(lowerCAmelCase ) __lowercase= self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head __lowercase= nn.Dropout(config.classifier_dropout_prob , inplace=lowerCAmelCase ) __lowercase= nn.Linear(lowerCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _A (self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ): __lowercase= return_dict if return_dict is not None else self.config.use_return_dict __lowercase= self.mobilenet_va(lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase ) __lowercase= outputs.pooler_output if return_dict else outputs[1] __lowercase= self.classifier(self.dropout(lowerCAmelCase ) ) __lowercase= None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowercase= 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowercase= 'single_label_classification' else: __lowercase= 'multi_label_classification' if self.config.problem_type == "regression": __lowercase= MSELoss() if self.num_labels == 1: __lowercase= loss_fct(logits.squeeze() , labels.squeeze() ) else: __lowercase= loss_fct(lowerCAmelCase , lowerCAmelCase ) elif self.config.problem_type == "single_label_classification": __lowercase= CrossEntropyLoss() __lowercase= loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __lowercase= BCEWithLogitsLoss() __lowercase= loss_fct(lowerCAmelCase , lowerCAmelCase ) if not return_dict: __lowercase= (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=lowerCAmelCase , logits=lowerCAmelCase , hidden_states=outputs.hidden_states , )
304
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class A ( A_ ): UpperCamelCase_ : torch.FloatTensor UpperCamelCase_ : torch.FloatTensor class A ( A_ , A_ ): UpperCamelCase_ : Dict =1 @register_to_config def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ): # standard deviation of the initial noise distribution __lowercase= sigma_max # setable values __lowercase= None self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = None ): return sample def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ): __lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps __lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ): __lowercase= sigma_min if sigma_min is not None else self.config.sigma_min __lowercase= sigma_max if sigma_max is not None else self.config.sigma_max __lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase , lowerCAmelCase ) __lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) __lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) ) __lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def _A (self , lowerCAmelCase , lowerCAmelCase ): return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) __lowercase= timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) __lowercase= (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda __lowercase= timesteps.to(self.discrete_sigmas.device ) __lowercase= self.discrete_sigmas[timesteps].to(sample.device ) __lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device ) __lowercase= torch.zeros_like(lowerCAmelCase ) __lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods __lowercase= diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): __lowercase= diffusion.unsqueeze(-1 ) __lowercase= drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of __lowercase= randn_tensor( sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype ) __lowercase= sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? __lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction __lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr __lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() __lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() __lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2 __lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term __lowercase= step_size.flatten() while len(step_size.shape ) < len(sample.shape ): __lowercase= step_size.unsqueeze(-1 ) __lowercase= sample + step_size * model_output __lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples __lowercase= timesteps.to(original_samples.device ) __lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps] __lowercase= ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None] ) __lowercase= noise + original_samples return noisy_samples def __len__(self ): return self.config.num_train_timesteps
304
1
def _lowerCamelCase( lowercase__ ) -> list: '''simple docstring''' if bit_count < 0: raise ValueError('The given input must be positive' ) # get the generated string sequence __lowercase= gray_code_sequence_string(lowercase__ ) # # convert them to integers for i in range(len(lowercase__ ) ): __lowercase= int(sequence[i] , 2 ) return sequence def _lowerCamelCase( lowercase__ ) -> list: '''simple docstring''' if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] __lowercase= 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits __lowercase= gray_code_sequence_string(bit_count - 1 ) __lowercase= [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): __lowercase= '0' + smaller_sequence[i] sequence.append(lowercase__ ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): __lowercase= '1' + smaller_sequence[i] sequence.append(lowercase__ ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
304
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device lowerCAmelCase = False class A ( unittest.TestCase ): pass @nightly @require_torch_gpu class A ( unittest.TestCase ): def _A (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _A (self ): __lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) __lowercase= torch.manual_seed(0 ) __lowercase= pipe.dual_guided( prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCAmelCase ) __lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= generator.manual_seed(0 ) __lowercase= pipe.dual_guided( prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def _A (self ): __lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= 'cyberpunk 2077' __lowercase= load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) __lowercase= torch.manual_seed(0 ) __lowercase= pipe.dual_guided( prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images __lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 __lowercase= 'A painting of a squirrel eating a burger ' __lowercase= torch.manual_seed(0 ) __lowercase= pipe.text_to_image( prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images __lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 __lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images __lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
304
1
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase = logging.get_logger(__name__) class A ( A_ ): UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask'''] def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ): super().__init__( feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= spectrogram_length __lowercase= num_channels __lowercase= patch_size __lowercase= feature_size // self.patch_size[1] __lowercase= n_fft __lowercase= sampling_rate // hop_length_to_sampling_rate __lowercase= sampling_rate __lowercase= padding_value __lowercase= mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T def _A (self , lowerCAmelCase ): __lowercase= spectrogram( lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , ) __lowercase= log_spec[:, :-1] __lowercase= log_spec - 20.0 __lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled' f' with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) __lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) __lowercase= is_batched_numpy or ( isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ): __lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa ) elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __lowercase= raw_speech.astype(np.floataa ) # always return batch if not is_batched: __lowercase= [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __lowercase= [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , lowerCAmelCase ): __lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __lowercase= max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __lowercase= [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __lowercase= np.array(lowerCAmelCase ).astype(np.floataa ) # convert into correct format for padding __lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __lowercase= padded_audio_features * self.padding_value for i in range(len(lowerCAmelCase ) ): __lowercase= audio_features[i] __lowercase= feature # return as BatchFeature if return_attention_mask: __lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: __lowercase= {'audio_values': padded_audio_features} __lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase ) return encoded_inputs
304
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase = { '''configuration_xmod''': [ '''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XmodConfig''', '''XmodOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XmodForCausalLM''', '''XmodForMaskedLM''', '''XmodForMultipleChoice''', '''XmodForQuestionAnswering''', '''XmodForSequenceClassification''', '''XmodForTokenClassification''', '''XmodModel''', '''XmodPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
304
1
def _lowerCamelCase( lowercase__ ) -> list: '''simple docstring''' def merge(lowercase__ , lowercase__ ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(lowercase__ ) <= 1: return collection __lowercase= len(lowercase__ ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase = [int(item) for item in user_input.split(''',''')] print(*merge_sort(unsorted), sep=''',''')
304
import math from datetime import datetime, timedelta def _lowerCamelCase( lowercase__ ) -> datetime: '''simple docstring''' __lowercase= year % 1_9 __lowercase= year % 4 __lowercase= year % 7 __lowercase= math.floor(year / 1_0_0 ) __lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 ) __lowercase= leap_day_inhibits / 4 __lowercase= ( 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 3_0 __lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 __lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0 # PHM -> Paschal Full Moon __lowercase= ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 2_9 and days_from_phm_to_sunday == 6: return datetime(lowercase__ , 4 , 1_9 ) elif days_to_add == 2_8 and days_from_phm_to_sunday == 6: return datetime(lowercase__ , 4 , 1_8 ) else: return datetime(lowercase__ , 3 , 2_2 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3): lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was''' print(F'Easter in {year} {tense} {gauss_easter(year)}')
304
1
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=3_0 , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=3_2 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=1_0 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= image_size __lowercase= patch_size __lowercase= num_channels __lowercase= is_training __lowercase= use_labels __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __lowercase= (image_size // patch_size) ** 2 __lowercase= num_patches + 1 def _A (self ): __lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= self.get_config() return config, pixel_values, labels def _A (self ): return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= TFViTModel(config=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , training=lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. __lowercase= self.image_size // 2 __lowercase= pixel_values[:, :, :image_size, :image_size] __lowercase= model(lowerCAmelCase , interpolate_pos_encoding=lowerCAmelCase , training=lowerCAmelCase ) __lowercase= (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.type_sequence_label_size __lowercase= TFViTForImageClassification(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. __lowercase= self.image_size // 2 __lowercase= pixel_values[:, :, :image_size, :image_size] __lowercase= model(lowerCAmelCase , interpolate_pos_encoding=lowerCAmelCase , training=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowercase= 1 __lowercase= TFViTForImageClassification(lowerCAmelCase ) __lowercase= floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() __lowercase, __lowercase, __lowercase= config_and_inputs __lowercase= {'pixel_values': pixel_values} return config, inputs_dict @require_tf class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : int =(TFViTModel, TFViTForImageClassification) if is_tf_available() else () UpperCamelCase_ : Union[str, Any] =( {'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification} if is_tf_available() else {} ) UpperCamelCase_ : Tuple =False UpperCamelCase_ : Optional[int] =False UpperCamelCase_ : Any =False def _A (self ): __lowercase= TFViTModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 ) def _A (self ): self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def _A (self ): pass @unittest.skip(reason='ViT does not use inputs_embeds' ) def _A (self ): pass def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= model_class(lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) __lowercase= model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase , tf.keras.layers.Layer ) ) def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= model_class(lowerCAmelCase ) __lowercase= inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase ) @slow def _A (self ): __lowercase= TFViTModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(lowerCAmelCase ) def _lowerCamelCase( ) -> Optional[Any]: '''simple docstring''' __lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class A ( unittest.TestCase ): @cached_property def _A (self ): return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None @slow def _A (self ): __lowercase= TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ) __lowercase= self.default_image_processor __lowercase= prepare_img() __lowercase= image_processor(images=lowerCAmelCase , return_tensors='tf' ) # forward pass __lowercase= model(**lowerCAmelCase ) # verify the logits __lowercase= tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase ) __lowercase= tf.constant([-0.27_44, 0.82_15, -0.08_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 )
304
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class A ( A_ ): UpperCamelCase_ : Optional[int] ='''blenderbot-small''' UpperCamelCase_ : Optional[Any] =['''past_key_values'''] UpperCamelCase_ : Optional[int] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=5_1_2 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ): __lowercase= vocab_size __lowercase= max_position_embeddings __lowercase= d_model __lowercase= encoder_ffn_dim __lowercase= encoder_layers __lowercase= encoder_attention_heads __lowercase= decoder_ffn_dim __lowercase= decoder_layers __lowercase= decoder_attention_heads __lowercase= dropout __lowercase= attention_dropout __lowercase= activation_dropout __lowercase= activation_function __lowercase= init_std __lowercase= encoder_layerdrop __lowercase= decoder_layerdrop __lowercase= use_cache __lowercase= encoder_layers __lowercase= scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , ) class A ( A_ ): @property def _A (self ): if self.task in ["default", "seq2seq-lm"]: __lowercase= OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: __lowercase= {0: 'batch'} __lowercase= {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __lowercase= {0: 'batch', 1: 'decoder_sequence'} __lowercase= {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' ) elif self.task == "causal-lm": # TODO: figure this case out. __lowercase= OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: __lowercase, __lowercase= self.num_layers for i in range(lowerCAmelCase ): __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} else: __lowercase= OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ] ) return common_inputs @property def _A (self ): if self.task in ["default", "seq2seq-lm"]: __lowercase= super().outputs else: __lowercase= super(lowerCAmelCase , self ).outputs if self.use_past: __lowercase, __lowercase= self.num_layers for i in range(lowerCAmelCase ): __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # Generate decoder inputs __lowercase= seq_length if not self.use_past else 1 __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) __lowercase= {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} __lowercase= dict(**lowerCAmelCase , **lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __lowercase, __lowercase= common_inputs['input_ids'].shape __lowercase= common_inputs['decoder_input_ids'].shape[1] __lowercase, __lowercase= self.num_attention_heads __lowercase= ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase= decoder_seq_length + 3 __lowercase= ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __lowercase= torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 ) __lowercase= [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __lowercase, __lowercase= self.num_layers __lowercase= min(lowerCAmelCase , lowerCAmelCase ) __lowercase= max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers __lowercase= 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(lowerCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), ) ) # TODO: test this. __lowercase= encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(lowerCAmelCase , lowerCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) ) return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __lowercase, __lowercase= common_inputs['input_ids'].shape # Not using the same length for past_key_values __lowercase= seqlen + 2 __lowercase, __lowercase= self.num_layers __lowercase, __lowercase= self.num_attention_heads __lowercase= ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase= common_inputs['attention_mask'].dtype __lowercase= torch.cat( [common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 ) __lowercase= [ (torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase ) ] return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __lowercase= compute_effective_axis_dimension( lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __lowercase= tokenizer.num_special_tokens_to_add(lowerCAmelCase ) __lowercase= compute_effective_axis_dimension( lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence __lowercase= [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size __lowercase= dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) ) return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): if self.task in ["default", "seq2seq-lm"]: __lowercase= self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) elif self.task == "causal-lm": __lowercase= self._generate_dummy_inputs_for_causal_lm( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) else: __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if self.task in ["default", "seq2seq-lm"]: __lowercase= super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) else: __lowercase= super(lowerCAmelCase , self )._flatten_past_key_values_( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
304
1
import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class A : @staticmethod def _A (*lowerCAmelCase , **lowerCAmelCase ): pass @is_pipeline_test @require_torch @require_vision class A ( unittest.TestCase ): UpperCamelCase_ : str =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' ) __lowercase= [ { 'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'question': 'How many cats are there?', }, { 'image': './tests/fixtures/tests_samples/COCO/000000039769.png', 'question': 'How many cats are there?', }, ] return vqa_pipeline, examples def _A (self , lowerCAmelCase , lowerCAmelCase ): __lowercase= vqa_pipeline(lowerCAmelCase , top_k=1 ) self.assertEqual( lowerCAmelCase , [ [{'score': ANY(lowerCAmelCase ), 'answer': ANY(lowerCAmelCase )}], [{'score': ANY(lowerCAmelCase ), 'answer': ANY(lowerCAmelCase )}], ] , ) @require_torch def _A (self ): __lowercase= pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' ) __lowercase= './tests/fixtures/tests_samples/COCO/000000039769.png' __lowercase= 'How many cats are there?' __lowercase= vqa_pipeline(image=lowerCAmelCase , question='How many cats are there?' , top_k=2 ) self.assertEqual( lowerCAmelCase , [{'score': ANY(lowerCAmelCase ), 'answer': ANY(lowerCAmelCase )}, {'score': ANY(lowerCAmelCase ), 'answer': ANY(lowerCAmelCase )}] ) __lowercase= vqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( lowerCAmelCase , [{'score': ANY(lowerCAmelCase ), 'answer': ANY(lowerCAmelCase )}, {'score': ANY(lowerCAmelCase ), 'answer': ANY(lowerCAmelCase )}] ) @slow @require_torch def _A (self ): __lowercase= pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' ) __lowercase= './tests/fixtures/tests_samples/COCO/000000039769.png' __lowercase= 'How many cats are there?' __lowercase= vqa_pipeline(image=lowerCAmelCase , question=lowerCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase , decimals=4 ) , [{'score': 0.87_99, 'answer': '2'}, {'score': 0.2_96, 'answer': '1'}] ) __lowercase= vqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase , decimals=4 ) , [{'score': 0.87_99, 'answer': '2'}, {'score': 0.2_96, 'answer': '1'}] ) __lowercase= vqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase , decimals=4 ) , [[{'score': 0.87_99, 'answer': '2'}, {'score': 0.2_96, 'answer': '1'}]] * 2 , ) @require_tf @unittest.skip('Visual question answering not implemented in TF' ) def _A (self ): pass
304
from math import factorial, radians def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float: '''simple docstring''' __lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians __lowercase= radians(lowercase__ ) __lowercase= angle_in_radians __lowercase= 3 __lowercase= -1 for _ in range(lowercase__ ): result += (b * (angle_in_radians**a)) / factorial(lowercase__ ) __lowercase= -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(lowercase__ , lowercase__ ) if __name__ == "__main__": __import__('''doctest''').testmod()
304
1
lowerCAmelCase = [ (1_0_0_0, '''M'''), (9_0_0, '''CM'''), (5_0_0, '''D'''), (4_0_0, '''CD'''), (1_0_0, '''C'''), (9_0, '''XC'''), (5_0, '''L'''), (4_0, '''XL'''), (1_0, '''X'''), (9, '''IX'''), (5, '''V'''), (4, '''IV'''), (1, '''I'''), ] def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' __lowercase= {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0} __lowercase= 0 __lowercase= 0 while place < len(lowercase__ ): if (place + 1 < len(lowercase__ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' __lowercase= [] for arabic, roman in ROMAN: ((__lowercase), (__lowercase))= divmod(lowercase__ , lowercase__ ) result.append(roman * factor ) if number == 0: break return "".join(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
304
lowerCAmelCase = [ 9_9_9, 8_0_0, 7_9_9, 6_0_0, 5_9_9, 5_0_0, 4_0_0, 3_9_9, 3_7_7, 3_5_5, 3_3_3, 3_1_1, 2_8_8, 2_6_6, 2_4_4, 2_2_2, 2_0_0, 1_9_9, 1_7_7, 1_5_5, 1_3_3, 1_1_1, 8_8, 6_6, 4_4, 2_2, 0, ] lowerCAmelCase = [ 9_9_9, 9_7_6, 9_5_2, 9_2_8, 9_0_5, 8_8_2, 8_5_8, 8_5_7, 8_1_0, 7_6_2, 7_1_5, 7_1_4, 5_7_2, 4_2_9, 4_2_8, 2_8_6, 2_8_5, 2_3_8, 1_9_0, 1_4_3, 1_4_2, 1_1_8, 9_5, 7_1, 4_7, 2_4, 0, ] lowerCAmelCase = [ 9_9_9, 9_8_8, 9_7_7, 9_6_6, 9_5_5, 9_4_4, 9_3_3, 9_2_2, 9_1_1, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_5_0, 3_0_0, 2_9_9, 2_6_6, 2_3_3, 2_0_0, 1_9_9, 1_7_9, 1_5_9, 1_4_0, 1_2_0, 1_0_0, 9_9, 8_8, 7_7, 6_6, 5_5, 4_4, 3_3, 2_2, 1_1, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_5, 9_9_2, 9_8_9, 9_8_5, 9_8_1, 9_7_8, 9_7_5, 9_7_1, 9_6_7, 9_6_4, 9_6_1, 9_5_7, 9_5_6, 9_5_1, 9_4_7, 9_4_2, 9_3_7, 9_3_3, 9_2_8, 9_2_3, 9_1_9, 9_1_4, 9_1_3, 9_0_8, 9_0_3, 8_9_7, 8_9_2, 8_8_7, 8_8_1, 8_7_6, 8_7_1, 8_7_0, 8_6_4, 8_5_8, 8_5_2, 8_4_6, 8_4_0, 8_3_4, 8_2_8, 8_2_7, 8_2_0, 8_1_3, 8_0_6, 7_9_9, 7_9_2, 7_8_5, 7_8_4, 7_7_7, 7_7_0, 7_6_3, 7_5_6, 7_4_9, 7_4_2, 7_4_1, 7_3_3, 7_2_4, 7_1_6, 7_0_7, 6_9_9, 6_9_8, 6_8_8, 6_7_7, 6_6_6, 6_5_6, 6_5_5, 6_4_5, 6_3_4, 6_2_3, 6_1_3, 6_1_2, 5_9_8, 5_8_4, 5_7_0, 5_6_9, 5_5_5, 5_4_1, 5_2_7, 5_2_6, 5_0_5, 4_8_4, 4_8_3, 4_6_2, 4_4_0, 4_3_9, 3_9_6, 3_9_5, 3_5_2, 3_5_1, 3_0_8, 3_0_7, 2_6_4, 2_6_3, 2_2_0, 2_1_9, 1_7_6, 1_3_2, 8_8, 4_4, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_7, 9_9_5, 9_9_2, 9_9_0, 9_8_8, 9_8_6, 9_8_4, 9_8_1, 9_7_9, 9_7_7, 9_7_5, 9_7_2, 9_7_0, 9_6_8, 9_6_6, 9_6_4, 9_6_1, 9_5_9, 9_5_7, 9_5_6, 9_5_4, 9_5_1, 9_4_9, 9_4_6, 9_4_4, 9_4_1, 9_3_9, 9_3_6, 9_3_4, 9_3_1, 9_2_9, 9_2_6, 9_2_4, 9_2_1, 9_1_9, 9_1_6, 9_1_4, 9_1_3, 9_1_0, 9_0_7, 9_0_5, 9_0_2, 8_9_9, 8_9_6, 8_9_3, 8_9_1, 8_8_8, 8_8_5, 8_8_2, 8_7_9, 8_7_7, 8_7_4, 8_7_1, 8_7_0, 8_6_7, 8_6_4, 8_6_1, 8_5_8, 8_5_5, 8_5_2, 8_4_9, 8_4_6, 8_4_3, 8_4_0, 8_3_7, 8_3_4, 8_3_1, 8_2_8, 8_2_7, 8_2_4, 8_2_1, 8_1_7, 8_1_4, 8_1_1, 8_0_8, 8_0_4, 8_0_1, 7_9_8, 7_9_5, 7_9_1, 7_8_8, 7_8_5, 7_8_4, 7_8_0, 7_7_7, 7_7_4, 7_7_0, 7_6_6, 7_6_3, 7_6_0, 7_5_6, 7_5_2, 7_4_9, 7_4_6, 7_4_2, 7_4_1, 7_3_7, 7_3_3, 7_3_0, 7_2_6, 7_2_2, 7_1_8, 7_1_4, 7_1_0, 7_0_7, 7_0_3, 6_9_9, 6_9_8, 6_9_4, 6_9_0, 6_8_5, 6_8_1, 6_7_7, 6_7_3, 6_6_9, 6_6_4, 6_6_0, 6_5_6, 6_5_5, 6_5_0, 6_4_6, 6_4_1, 6_3_6, 6_3_2, 6_2_7, 6_2_2, 6_1_8, 6_1_3, 6_1_2, 6_0_7, 6_0_2, 5_9_6, 5_9_1, 5_8_6, 5_8_0, 5_7_5, 5_7_0, 5_6_9, 5_6_3, 5_5_7, 5_5_1, 5_4_5, 5_3_9, 5_3_3, 5_2_7, 5_2_6, 5_1_9, 5_1_2, 5_0_5, 4_9_8, 4_9_1, 4_8_4, 4_8_3, 4_7_4, 4_6_6, 4_5_7, 4_4_9, 4_4_0, 4_3_9, 4_2_8, 4_1_8, 4_0_7, 3_9_6, 3_9_5, 3_8_1, 3_6_6, 3_5_2, 3_5_1, 3_3_0, 3_0_8, 3_0_7, 2_8_6, 2_6_4, 2_6_3, 2_4_2, 2_2_0, 2_1_9, 1_7_6, 1_7_5, 1_3_2, 1_3_1, 8_8, 4_4, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_1, 9_8_2, 9_7_4, 9_6_6, 9_5_8, 9_5_0, 9_4_1, 9_3_3, 9_2_5, 9_1_6, 9_0_8, 9_0_0, 8_9_9, 8_7_4, 8_5_0, 8_2_5, 8_0_0, 7_9_9, 7_0_0, 6_0_0, 5_0_0, 4_0_0, 3_0_0, 2_0_0, 1_0_0, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_2, 9_8_5, 9_7_8, 9_7_1, 9_6_4, 9_5_7, 9_4_9, 9_4_2, 9_3_5, 9_2_8, 9_2_1, 9_1_4, 9_0_7, 9_0_0, 8_9_9, 8_7_9, 8_5_9, 8_4_0, 8_2_0, 8_0_0, 7_9_9, 7_6_6, 7_3_3, 7_0_0, 6_9_9, 6_5_0, 6_0_0, 5_9_9, 5_0_0, 4_9_9, 4_0_0, 3_9_9, 3_0_0, 2_9_9, 2_0_0, 1_9_9, 1_0_0, 9_9, 0, ] lowerCAmelCase = [ 9_9_9, 9_9_6, 9_9_2, 9_8_9, 9_8_5, 9_8_2, 9_7_9, 9_7_5, 9_7_2, 9_6_8, 9_6_5, 9_6_1, 9_5_8, 9_5_5, 9_5_1, 9_4_8, 9_4_4, 9_4_1, 9_3_8, 9_3_4, 9_3_1, 9_2_7, 9_2_4, 9_2_0, 9_1_7, 9_1_4, 9_1_0, 9_0_7, 9_0_3, 9_0_0, 8_9_9, 8_9_1, 8_8_4, 8_7_6, 8_6_9, 8_6_1, 8_5_3, 8_4_6, 8_3_8, 8_3_0, 8_2_3, 8_1_5, 8_0_8, 8_0_0, 7_9_9, 7_8_8, 7_7_7, 7_6_6, 7_5_5, 7_4_4, 7_3_3, 7_2_2, 7_1_1, 7_0_0, 6_9_9, 6_8_8, 6_7_7, 6_6_6, 6_5_5, 6_4_4, 6_3_3, 6_2_2, 6_1_1, 6_0_0, 5_9_9, 5_8_5, 5_7_1, 5_5_7, 5_4_2, 5_2_8, 5_1_4, 5_0_0, 4_9_9, 4_8_5, 4_7_1, 4_5_7, 4_4_2, 4_2_8, 4_1_4, 4_0_0, 3_9_9, 3_7_9, 3_5_9, 3_4_0, 3_2_0, 3_0_0, 2_9_9, 2_7_9, 2_5_9, 2_4_0, 2_2_0, 2_0_0, 1_9_9, 1_6_6, 1_3_3, 1_0_0, 9_9, 6_6, 3_3, 0, ]
304
1
from collections.abc import Callable import numpy as np def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> np.ndarray: '''simple docstring''' __lowercase= int(np.ceil((x_end - xa) / step_size ) ) __lowercase= np.zeros((n + 1,) ) __lowercase= ya __lowercase= xa for k in range(lowercase__ ): __lowercase= y[k] + step_size * ode_func(lowercase__ , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
304
from __future__ import annotations import numpy as np def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' return np.maximum(0 , lowercase__ ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
304
1
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int: '''simple docstring''' __lowercase= 2**power __lowercase= str(lowercase__ ) __lowercase= list(lowercase__ ) __lowercase= 0 for i in list_num: sum_of_num += int(lowercase__ ) return sum_of_num if __name__ == "__main__": lowerCAmelCase = int(input('''Enter the power of 2: ''').strip()) print('''2 ^ ''', power, ''' = ''', 2**power) lowerCAmelCase = solution(power) print('''Sum of the digits is: ''', result)
304
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int: '''simple docstring''' __lowercase= 2**power __lowercase= str(lowercase__ ) __lowercase= list(lowercase__ ) __lowercase= 0 for i in list_num: sum_of_num += int(lowercase__ ) return sum_of_num if __name__ == "__main__": lowerCAmelCase = int(input('''Enter the power of 2: ''').strip()) print('''2 ^ ''', power, ''' = ''', 2**power) lowerCAmelCase = solution(power) print('''Sum of the digits is: ''', result)
304
1
from __future__ import annotations import numpy as np def _lowerCamelCase( lowercase__ ) -> tuple[np.ndarray, np.ndarray]: '''simple docstring''' __lowercase, __lowercase= np.shape(lowercase__ ) if rows != columns: __lowercase= ( '\'table\' has to be of square shaped array but got a ' F'{rows}x{columns} array:\n{table}' ) raise ValueError(lowercase__ ) __lowercase= np.zeros((rows, columns) ) __lowercase= np.zeros((rows, columns) ) for i in range(lowercase__ ): for j in range(lowercase__ ): __lowercase= sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) ) if upper[j][j] == 0: raise ArithmeticError('No LU decomposition exists' ) __lowercase= (table[i][j] - total) / upper[j][j] __lowercase= 1 for j in range(lowercase__ , lowercase__ ): __lowercase= sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) ) __lowercase= table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
304
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int: '''simple docstring''' __lowercase= {} if train_file is not None: __lowercase= [train_file] if eval_file is not None: __lowercase= [eval_file] if test_file is not None: __lowercase= [test_file] __lowercase= datasets.load_dataset('csv' , data_files=lowercase__ ) __lowercase= list(ds[list(files.keys() )[0]].features.keys() ) __lowercase= features_name.pop(lowercase__ ) __lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) ) __lowercase= {label: i for i, label in enumerate(lowercase__ )} __lowercase= tokenizer.model_input_names __lowercase= {} if len(lowercase__ ) == 1: for k in files.keys(): __lowercase= ds[k].map( lambda lowercase__ : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , ) elif len(lowercase__ ) == 2: for k in files.keys(): __lowercase= ds[k].map( lambda lowercase__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: __lowercase= {k: v for k, v in ex.items() if k in input_names} __lowercase= labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: __lowercase= {k: v for k, v in ex.items() if k in input_names} __lowercase= labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: __lowercase= {k: v for k, v in ex.items() if k in input_names} __lowercase= labelaid[ex[label_name]] yield (d, label) __lowercase= ( tf.data.Dataset.from_generator( lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: __lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) __lowercase= ( tf.data.Dataset.from_generator( lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: __lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) __lowercase= ( tf.data.Dataset.from_generator( lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: __lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid lowerCAmelCase = logging.getLogger(__name__) @dataclass class A : UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} ) UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} ) UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} ) UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} ) UpperCamelCase_ : int =field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class A : UpperCamelCase_ : str =field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def _lowerCamelCase( ) -> Optional[Any]: '''simple docstring''' __lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) __lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.info( F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ' F'16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowercase= AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowercase, __lowercase, __lowercase, __lowercase= get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) __lowercase= AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): __lowercase= TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , ) def compute_metrics(lowercase__ ) -> Dict: __lowercase= np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer __lowercase= TFTrainer( model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowercase= {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __lowercase= trainer.evaluate() __lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' ) with open(lowercase__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(F' {key} = {value}' ) writer.write(F'{key} = {value}\n' ) results.update(lowercase__ ) return results if __name__ == "__main__": main()
304
1
from math import isqrt, loga def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , lowercase__ , lowercase__ ): __lowercase= False return [i for i in range(2 , lowercase__ ) if is_prime[i]] def _lowerCamelCase( lowercase__ = 8_0_0_8_0_0 , lowercase__ = 8_0_0_8_0_0 ) -> int: '''simple docstring''' __lowercase= degree * loga(lowercase__ ) __lowercase= int(lowercase__ ) __lowercase= calculate_prime_numbers(lowercase__ ) __lowercase= 0 __lowercase= 0 __lowercase= len(lowercase__ ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F'{solution() = }')
304
import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A ( A_ ): def _A (self ): __lowercase= self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) ) self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) ) class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ): __lowercase= parent __lowercase= batch_size __lowercase= image_size __lowercase= patch_sizes __lowercase= patch_stride __lowercase= patch_padding __lowercase= is_training __lowercase= use_labels __lowercase= num_labels __lowercase= num_channels __lowercase= embed_dim __lowercase= num_heads __lowercase= stride_kv __lowercase= depth __lowercase= cls_token __lowercase= attention_drop_rate __lowercase= initializer_range __lowercase= layer_norm_eps def _A (self ): __lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.num_labels ) __lowercase= self.get_config() return config, pixel_values, labels def _A (self ): return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= CvtModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= (self.image_size, self.image_size) __lowercase, __lowercase= image_size[0], image_size[1] for i in range(len(self.depth ) ): __lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) __lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= CvtForImageClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() __lowercase, __lowercase, __lowercase= config_and_inputs __lowercase= {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else () UpperCamelCase_ : List[str] =( {'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification} if is_torch_available() else {} ) UpperCamelCase_ : str =False UpperCamelCase_ : List[Any] =False UpperCamelCase_ : Any =False UpperCamelCase_ : Union[str, Any] =False UpperCamelCase_ : Tuple =False def _A (self ): __lowercase= CvtModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 ) def _A (self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _A (self ): return @unittest.skip(reason='Cvt does not output attentions' ) def _A (self ): pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def _A (self ): pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def _A (self ): pass def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= model_class(lowerCAmelCase ) __lowercase= inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def _A (self ): def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() with torch.no_grad(): __lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) ) __lowercase= outputs.hidden_states __lowercase= len(self.model_tester.depth ) self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase= True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _A (self ): pass @slow def _A (self ): for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= CvtModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def _lowerCamelCase( ) -> Optional[int]: '''simple docstring''' __lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class A ( unittest.TestCase ): @cached_property def _A (self ): return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def _A (self ): __lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase ) __lowercase= self.default_image_processor __lowercase= prepare_img() __lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase ) # forward pass with torch.no_grad(): __lowercase= model(**lowerCAmelCase ) # verify the logits __lowercase= torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase ) __lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
304
1
def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= len(lowercase__ ) for i in range(lowercase__ ): for j in range(i + 1 , lowercase__ ): if numbers[j] < numbers[i]: __lowercase, __lowercase= numbers[j], numbers[i] return numbers if __name__ == "__main__": lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase = [int(item) for item in user_input.split(''',''')] print(exchange_sort(unsorted))
304
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MraForMaskedLM''', '''MraForMultipleChoice''', '''MraForQuestionAnswering''', '''MraForSequenceClassification''', '''MraForTokenClassification''', '''MraLayer''', '''MraModel''', '''MraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
304
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase = { '''configuration_x_clip''': [ '''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XCLIPConfig''', '''XCLIPTextConfig''', '''XCLIPVisionConfig''', ], '''processing_x_clip''': ['''XCLIPProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XCLIPModel''', '''XCLIPPreTrainedModel''', '''XCLIPTextModel''', '''XCLIPVisionModel''', ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
304
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: ''' lowerCAmelCase = '''======= >>>>>>> ''' lowerCAmelCase = [ '''TextEncoderConfig''', '''ByteTextEncoder''', '''SubwordTextEncoder''', '''encoder_config''', '''maybe_build_from_corpus''', '''manual_dir''', ] lowerCAmelCase = [ # (pattern, replacement) # Order is important here for some replacements (R'''tfds\.core''', R'''datasets'''), (R'''tf\.io\.gfile\.GFile''', R'''open'''), (R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''), (R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''), (R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''), (R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''), (R'''tfds\.features\.FeaturesDict\(''', R'''dict('''), (R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''), (R'''tfds\.''', R'''datasets.'''), (R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''), (R'''self\.builder_config''', R'''self.config'''), ] def _lowerCamelCase( lowercase__ ) -> Optional[int]: '''simple docstring''' return ConvertCommand(args.tfds_path , args.datasets_directory ) class A ( A_ ): @staticmethod def _A (lowerCAmelCase ): __lowercase= parser.add_parser( 'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , ) train_parser.add_argument( '--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , ) train_parser.add_argument( '--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' ) train_parser.set_defaults(func=lowerCAmelCase ) def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= get_logger('datasets-cli/converting' ) __lowercase= tfds_path __lowercase= datasets_directory def _A (self ): if os.path.isdir(self._tfds_path ): __lowercase= os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): __lowercase= os.path.dirname(self._tfds_path ) else: raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' ) __lowercase= os.path.abspath(self._datasets_directory ) self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' ) __lowercase= [] __lowercase= [] __lowercase= {} if os.path.isdir(self._tfds_path ): __lowercase= os.listdir(lowerCAmelCase ) else: __lowercase= [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'Looking at file {f_name}' ) __lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase ) __lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase ) if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('Skipping file' ) continue with open(lowerCAmelCase , encoding='utf-8' ) as f: __lowercase= f.readlines() __lowercase= [] __lowercase= False __lowercase= False __lowercase= [] for line in lines: __lowercase= line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: __lowercase= 'import datasets\n' elif "import tensorflow" in out_line: # order is important here __lowercase= '' continue elif "from absl import logging" in out_line: __lowercase= 'from datasets import logging\n' elif "getLogger" in out_line: __lowercase= out_line.replace('getLogger' , 'get_logger' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): __lowercase= True __lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' ) out_lines.append(lowerCAmelCase ) out_lines.append(lowerCAmelCase ) continue else: for pattern, replacement in TO_CONVERT: __lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: __lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) ) __lowercase= 'from . import ' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'Error converting {out_line.strip()}' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: __lowercase= True out_lines.append(lowerCAmelCase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset __lowercase= f_name.replace('.py' , '' ) __lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase ) __lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase ) os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase ) self._logger.info(f'Adding directory {output_dir}' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(lowerCAmelCase ) if needs_manual_update: with_manual_update.append(lowerCAmelCase ) with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f: f.writelines(lowerCAmelCase ) self._logger.info(f'Converted in {output_file}' ) for utils_file in utils_files: try: __lowercase= os.path.basename(lowerCAmelCase ) __lowercase= imports_to_builder_map[f_name.replace('.py' , '' )] self._logger.info(f'Moving {dest_folder} to {utils_file}' ) shutil.copy(lowerCAmelCase , lowerCAmelCase ) except KeyError: self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
304
1
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel lowerCAmelCase = '''0.12''' # assumed parallelism: 8 @require_flax @is_staging_test class A ( unittest.TestCase ): @classmethod def _A (cls ): __lowercase= TOKEN HfFolder.save_token(lowerCAmelCase ) @classmethod def _A (cls ): try: delete_repo(token=cls._token , repo_id='test-model-flax' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-model-flax-org' ) except HTTPError: pass def _A (self ): __lowercase= BertConfig( vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 ) __lowercase= FlaxBertModel(lowerCAmelCase ) model.push_to_hub('test-model-flax' , use_auth_token=self._token ) __lowercase= FlaxBertModel.from_pretrained(f'{USER}/test-model-flax' ) __lowercase= flatten_dict(unfreeze(model.params ) ) __lowercase= flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __lowercase= (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(lowerCAmelCase , 1E-3 , msg=f'{key} not identical' ) # Reset repo delete_repo(token=self._token , repo_id='test-model-flax' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(lowerCAmelCase , repo_id='test-model-flax' , push_to_hub=lowerCAmelCase , use_auth_token=self._token ) __lowercase= FlaxBertModel.from_pretrained(f'{USER}/test-model-flax' ) __lowercase= flatten_dict(unfreeze(model.params ) ) __lowercase= flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __lowercase= (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(lowerCAmelCase , 1E-3 , msg=f'{key} not identical' ) def _A (self ): __lowercase= BertConfig( vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 ) __lowercase= FlaxBertModel(lowerCAmelCase ) model.push_to_hub('valid_org/test-model-flax-org' , use_auth_token=self._token ) __lowercase= FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' ) __lowercase= flatten_dict(unfreeze(model.params ) ) __lowercase= flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __lowercase= (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(lowerCAmelCase , 1E-3 , msg=f'{key} not identical' ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-model-flax-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( lowerCAmelCase , repo_id='valid_org/test-model-flax-org' , push_to_hub=lowerCAmelCase , use_auth_token=self._token ) __lowercase= FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' ) __lowercase= flatten_dict(unfreeze(model.params ) ) __lowercase= flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __lowercase= (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(lowerCAmelCase , 1E-3 , msg=f'{key} not identical' ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[str]: '''simple docstring''' __lowercase= True __lowercase= flatten_dict(modela.params ) __lowercase= flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: __lowercase= False return models_are_equal @require_flax class A ( unittest.TestCase ): def _A (self ): __lowercase= BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' ) __lowercase= FlaxBertModel(lowerCAmelCase ) __lowercase= 'bert' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(lowerCAmelCase , lowerCAmelCase ) ) with self.assertRaises(lowerCAmelCase ): __lowercase= FlaxBertModel.from_pretrained(lowerCAmelCase ) __lowercase= FlaxBertModel.from_pretrained(lowerCAmelCase , subfolder=lowerCAmelCase ) self.assertTrue(check_models_equal(lowerCAmelCase , lowerCAmelCase ) ) def _A (self ): __lowercase= BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' ) __lowercase= FlaxBertModel(lowerCAmelCase ) __lowercase= 'bert' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(lowerCAmelCase , lowerCAmelCase ) , max_shard_size='10KB' ) with self.assertRaises(lowerCAmelCase ): __lowercase= FlaxBertModel.from_pretrained(lowerCAmelCase ) __lowercase= FlaxBertModel.from_pretrained(lowerCAmelCase , subfolder=lowerCAmelCase ) self.assertTrue(check_models_equal(lowerCAmelCase , lowerCAmelCase ) ) def _A (self ): __lowercase= 'bert' __lowercase= 'hf-internal-testing/tiny-random-bert-subfolder' with self.assertRaises(lowerCAmelCase ): __lowercase= FlaxBertModel.from_pretrained(lowerCAmelCase ) __lowercase= FlaxBertModel.from_pretrained(lowerCAmelCase , subfolder=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def _A (self ): __lowercase= 'bert' __lowercase= 'hf-internal-testing/tiny-random-bert-sharded-subfolder' with self.assertRaises(lowerCAmelCase ): __lowercase= FlaxBertModel.from_pretrained(lowerCAmelCase ) __lowercase= FlaxBertModel.from_pretrained(lowerCAmelCase , subfolder=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase )
304
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig lowerCAmelCase = { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''', } class A ( A_ ): UpperCamelCase_ : Optional[int] ='''albert''' def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ): super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase ) __lowercase= vocab_size __lowercase= embedding_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_hidden_groups __lowercase= num_attention_heads __lowercase= inner_group_num __lowercase= hidden_act __lowercase= intermediate_size __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= initializer_range __lowercase= layer_norm_eps __lowercase= classifier_dropout_prob __lowercase= position_embedding_type class A ( A_ ): @property def _A (self ): if self.task == "multiple-choice": __lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'} else: __lowercase= {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
304
1
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Dict: '''simple docstring''' __lowercase= AlbertConfig.from_json_file(lowercase__ ) print(F'Building PyTorch model from configuration: {config}' ) __lowercase= AlbertForPreTraining(lowercase__ ) # Load weights from tf checkpoint load_tf_weights_in_albert(lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , lowercase__ ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--albert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained ALBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
304
import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]: '''simple docstring''' __lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' __lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' ) __lowercase= transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ), ] ) __lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ ) return image def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' if "visual_encoder" in key: __lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ ) if "blocks" in key: __lowercase= re.sub(R'blocks' , 'layers' , lowercase__ ) if "attn" in key: __lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ ) if "norm1" in key: __lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ ) if "norm2" in key: __lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ ) if "encoder.norm" in key: __lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ ) if "encoder.patch_embed.proj" in key: __lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ ) if "encoder.pos_embed" in key: __lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ ) if "encoder.cls_token" in key: __lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ ) if "self_attn" in key: __lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ ) return key @torch.no_grad() def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int: '''simple docstring''' if config_path is not None: __lowercase= BlipConfig.from_pretrained(lowercase__ ) else: __lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} ) __lowercase= BlipForConditionalGeneration(lowercase__ ).eval() __lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth' __lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' ) __lowercase= pt_model.eval() __lowercase= pt_model.state_dict() for key in modified_state_dict.copy(): __lowercase= modified_state_dict.pop(lowercase__ ) __lowercase= rename_key(lowercase__ ) __lowercase= value hf_model.load_state_dict(lowercase__ ) __lowercase= 3_8_4 __lowercase= load_demo_image(image_size=lowercase__ , device='cpu' ) __lowercase= BertTokenizer.from_pretrained('bert-base-uncased' ) __lowercase= tokenizer(['a picture of'] ).input_ids __lowercase= hf_model.generate(lowercase__ , lowercase__ ) assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2] __lowercase= hf_model.generate(lowercase__ ) assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(lowercase__ ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' __lowercase= ( 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth' ) __lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' ) vqa_model.eval() __lowercase= vqa_model.state_dict() for key in modified_state_dict.copy(): __lowercase= modified_state_dict.pop(lowercase__ ) __lowercase= rename_key(lowercase__ ) __lowercase= value __lowercase= BlipForQuestionAnswering(lowercase__ ) hf_vqa_model.load_state_dict(lowercase__ ) __lowercase= ['How many dogs are in this image?'] __lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids __lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' ) __lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth' __lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' ) itm_model.eval() __lowercase= itm_model.state_dict() for key in modified_state_dict.copy(): __lowercase= modified_state_dict.pop(lowercase__ ) __lowercase= rename_key(lowercase__ ) __lowercase= value __lowercase= BlipForImageTextRetrieval(lowercase__ ) __lowercase= ['A picture of a woman with a dog sitting in a beach'] __lowercase= tokenizer( lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids hf_itm_model.load_state_dict(lowercase__ ) hf_itm_model.eval() __lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ ) __lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ ) assert out[0].item() == 0.2110_6874_9427_7954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') lowerCAmelCase = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
304
1
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class A ( unittest.TestCase ): def __init__(self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=1_8 , lowerCAmelCase=3_0 , lowerCAmelCase=4_0_0 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ): __lowercase= parent __lowercase= batch_size __lowercase= num_channels __lowercase= image_size __lowercase= min_resolution __lowercase= max_resolution __lowercase= do_resize __lowercase= size if size is not None else {'height': 1_8, 'width': 2_0} __lowercase= do_thumbnail __lowercase= do_align_axis __lowercase= do_pad __lowercase= do_normalize __lowercase= image_mean __lowercase= image_std def _A (self ): return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class A ( A_ , unittest.TestCase ): UpperCamelCase_ : Optional[Any] =DonutImageProcessor if is_vision_available() else None def _A (self ): __lowercase= DonutImageProcessingTester(self ) @property def _A (self ): return self.image_processor_tester.prepare_image_processor_dict() def _A (self ): __lowercase= self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase , 'do_resize' ) ) self.assertTrue(hasattr(lowerCAmelCase , 'size' ) ) self.assertTrue(hasattr(lowerCAmelCase , 'do_thumbnail' ) ) self.assertTrue(hasattr(lowerCAmelCase , 'do_align_long_axis' ) ) self.assertTrue(hasattr(lowerCAmelCase , 'do_pad' ) ) self.assertTrue(hasattr(lowerCAmelCase , 'do_normalize' ) ) self.assertTrue(hasattr(lowerCAmelCase , 'image_mean' ) ) self.assertTrue(hasattr(lowerCAmelCase , 'image_std' ) ) def _A (self ): __lowercase= self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 1_8, 'width': 2_0} ) __lowercase= self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} ) # Previous config had dimensions in (width, height) order __lowercase= self.image_processing_class.from_dict(self.image_processor_dict , size=(4_2, 8_4) ) self.assertEqual(image_processor.size , {'height': 8_4, 'width': 4_2} ) def _A (self ): pass @is_flaky() def _A (self ): # Initialize image_processing __lowercase= self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowercase= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , Image.Image ) # Test not batched input __lowercase= image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched __lowercase= image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def _A (self ): # Initialize image_processing __lowercase= self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowercase= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , np.ndarray ) # Test not batched input __lowercase= image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched __lowercase= image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def _A (self ): # Initialize image_processing __lowercase= self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowercase= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , torch.Tensor ) # Test not batched input __lowercase= image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched __lowercase= image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
304
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1) lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2) @dataclass class A : UpperCamelCase_ : int UpperCamelCase_ : Node | None class A : def __init__(self , lowerCAmelCase ): __lowercase= None for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ): __lowercase= Node(lowerCAmelCase , self.head ) def __iter__(self ): __lowercase= self.head while node: yield node.data __lowercase= node.next_node def __len__(self ): return sum(1 for _ in self ) def __str__(self ): return " -> ".join([str(lowerCAmelCase ) for node in self] ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList: '''simple docstring''' return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
304
1
from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _lowerCamelCase( ) -> Tuple: '''simple docstring''' import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join __lowercase= '__test_patch_submodule_mock__' with patch_submodule(_test_patching , 'os.path.join' , lowercase__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _lowerCamelCase( ) -> List[Any]: '''simple docstring''' assert _test_patching.open is open __lowercase= '__test_patch_submodule_builtin_mock__' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , 'open' , lowercase__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _lowerCamelCase( ) -> str: '''simple docstring''' __lowercase= '__test_patch_submodule_missing_mock__' with patch_submodule(_test_patching , 'pandas.read_csv' , lowercase__ ): pass def _lowerCamelCase( ) -> List[Any]: '''simple docstring''' __lowercase= '__test_patch_submodule_missing_builtin_mock__' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , 'len' , lowercase__ ) is None with patch_submodule(_test_patching , 'len' , lowercase__ ): assert _test_patching.len is mock assert _test_patching.len is len def _lowerCamelCase( ) -> int: '''simple docstring''' __lowercase= '__test_patch_submodule_start_and_stop_mock__' __lowercase= patch_submodule(_test_patching , 'open' , lowercase__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _lowerCamelCase( ) -> Union[str, Any]: '''simple docstring''' from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join __lowercase= '__test_patch_submodule_successive_join__' __lowercase= '__test_patch_submodule_successive_dirname__' __lowercase= '__test_patch_submodule_successive_rename__' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , 'os.path.join' , lowercase__ ): with patch_submodule(_test_patching , 'os.rename' , lowercase__ ): with patch_submodule(_test_patching , 'os.path.dirname' , lowercase__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , 'os.rename' , lowercase__ ): with patch_submodule(_test_patching , 'os.path.join' , lowercase__ ): with patch_submodule(_test_patching , 'os.path.dirname' , lowercase__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _lowerCamelCase( ) -> int: '''simple docstring''' __lowercase= '__test_patch_submodule_doesnt_exist_mock__' with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , lowercase__ ): pass with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , lowercase__ ): pass
304
from __future__ import annotations from collections.abc import Callable def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float: '''simple docstring''' __lowercase= x_start __lowercase= fnc(lowercase__ ) __lowercase= 0.0 for _ in range(lowercase__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area __lowercase= (x_end - x_start) / steps + xa __lowercase= fnc(lowercase__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step __lowercase= xa __lowercase= fxa return area if __name__ == "__main__": def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' return x**3 + x**2 print('''f(x) = x^3 + x^2''') print('''The area between the curve, x = -5, x = 5 and the x axis is:''') lowerCAmelCase = 1_0 while i <= 1_0_0_0_0_0: print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}') i *= 1_0
304
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCAmelCase ).to(lowerCAmelCase ) __lowercase= AutoTokenizer.from_pretrained('google/mt5-small' ) __lowercase= tokenizer('Hello there' , return_tensors='pt' ).input_ids __lowercase= tokenizer('Hi I am' , return_tensors='pt' ).input_ids __lowercase= model(input_ids.to(lowerCAmelCase ) , labels=labels.to(lowerCAmelCase ) ).loss __lowercase= -(labels.shape[-1] * loss.item()) __lowercase= -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
304
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_input_lengths __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= gelu_activation __lowercase= sinusoidal_embeddings __lowercase= causal __lowercase= asm __lowercase= n_langs __lowercase= vocab_size __lowercase= n_special __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= summary_type __lowercase= use_proj __lowercase= scope __lowercase= bos_token_id def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= random_attention_mask([self.batch_size, self.seq_length] ) __lowercase= None if self.use_input_lengths: __lowercase= ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , 2 ).float() __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _A (self ): return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , langs=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMWithLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) __lowercase= outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForQuestionAnswering(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model( lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , ) __lowercase= model( lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , ) ((__lowercase), )= result_with_labels.to_tuple() __lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) ((__lowercase), )= result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= XLMForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= self.num_labels __lowercase= XLMForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= self.num_choices __lowercase= XLMForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : int =( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) UpperCamelCase_ : Dict =( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable UpperCamelCase_ : str =( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def _A (self ): __lowercase= XLMModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ): self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertListEqual( [isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) ) self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(lowerCAmelCase ): # adds PAD dummy token __lowercase= min_length + idx + 1 __lowercase= min_length + idx + 1 __lowercase= ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ): self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertListEqual( [isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , ) self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(lowerCAmelCase ): # adds PAD dummy token __lowercase= min_length + idx + 1 __lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , ) pass @slow def _A (self ): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= XLMModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(lowerCAmelCase ) __lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president __lowercase= [ 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference __lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
304
1
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class A ( A_ ): UpperCamelCase_ : Optional[int] ='''blenderbot-small''' UpperCamelCase_ : Optional[Any] =['''past_key_values'''] UpperCamelCase_ : Optional[int] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=5_1_2 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ): __lowercase= vocab_size __lowercase= max_position_embeddings __lowercase= d_model __lowercase= encoder_ffn_dim __lowercase= encoder_layers __lowercase= encoder_attention_heads __lowercase= decoder_ffn_dim __lowercase= decoder_layers __lowercase= decoder_attention_heads __lowercase= dropout __lowercase= attention_dropout __lowercase= activation_dropout __lowercase= activation_function __lowercase= init_std __lowercase= encoder_layerdrop __lowercase= decoder_layerdrop __lowercase= use_cache __lowercase= encoder_layers __lowercase= scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , ) class A ( A_ ): @property def _A (self ): if self.task in ["default", "seq2seq-lm"]: __lowercase= OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: __lowercase= {0: 'batch'} __lowercase= {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __lowercase= {0: 'batch', 1: 'decoder_sequence'} __lowercase= {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' ) elif self.task == "causal-lm": # TODO: figure this case out. __lowercase= OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: __lowercase, __lowercase= self.num_layers for i in range(lowerCAmelCase ): __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} else: __lowercase= OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ] ) return common_inputs @property def _A (self ): if self.task in ["default", "seq2seq-lm"]: __lowercase= super().outputs else: __lowercase= super(lowerCAmelCase , self ).outputs if self.use_past: __lowercase, __lowercase= self.num_layers for i in range(lowerCAmelCase ): __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # Generate decoder inputs __lowercase= seq_length if not self.use_past else 1 __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) __lowercase= {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} __lowercase= dict(**lowerCAmelCase , **lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __lowercase, __lowercase= common_inputs['input_ids'].shape __lowercase= common_inputs['decoder_input_ids'].shape[1] __lowercase, __lowercase= self.num_attention_heads __lowercase= ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase= decoder_seq_length + 3 __lowercase= ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __lowercase= torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 ) __lowercase= [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __lowercase, __lowercase= self.num_layers __lowercase= min(lowerCAmelCase , lowerCAmelCase ) __lowercase= max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers __lowercase= 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(lowerCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), ) ) # TODO: test this. __lowercase= encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(lowerCAmelCase , lowerCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) ) return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __lowercase, __lowercase= common_inputs['input_ids'].shape # Not using the same length for past_key_values __lowercase= seqlen + 2 __lowercase, __lowercase= self.num_layers __lowercase, __lowercase= self.num_attention_heads __lowercase= ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase= common_inputs['attention_mask'].dtype __lowercase= torch.cat( [common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 ) __lowercase= [ (torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase ) ] return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __lowercase= compute_effective_axis_dimension( lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __lowercase= tokenizer.num_special_tokens_to_add(lowerCAmelCase ) __lowercase= compute_effective_axis_dimension( lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence __lowercase= [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size __lowercase= dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) ) return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): if self.task in ["default", "seq2seq-lm"]: __lowercase= self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) elif self.task == "causal-lm": __lowercase= self._generate_dummy_inputs_for_causal_lm( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) else: __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if self.task in ["default", "seq2seq-lm"]: __lowercase= super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) else: __lowercase= super(lowerCAmelCase , self )._flatten_past_key_values_( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
304
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent lowerCAmelCase = {'''UserAgent''': UserAgent().random} def _lowerCamelCase( lowercase__ ) -> dict: '''simple docstring''' __lowercase= script.contents[0] __lowercase= json.loads(data[data.find('{"config"' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class A : def __init__(self , lowerCAmelCase ): __lowercase= f'https://www.instagram.com/{username}/' __lowercase= self.get_json() def _A (self ): __lowercase= requests.get(self.url , headers=lowerCAmelCase ).text __lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__(self ): return f'{self.__class__.__name__}(\'{self.username}\')' def __str__(self ): return f'{self.fullname} ({self.username}) is {self.biography}' @property def _A (self ): return self.user_data["username"] @property def _A (self ): return self.user_data["full_name"] @property def _A (self ): return self.user_data["biography"] @property def _A (self ): return self.user_data["business_email"] @property def _A (self ): return self.user_data["external_url"] @property def _A (self ): return self.user_data["edge_followed_by"]["count"] @property def _A (self ): return self.user_data["edge_follow"]["count"] @property def _A (self ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def _A (self ): return self.user_data["profile_pic_url_hd"] @property def _A (self ): return self.user_data["is_verified"] @property def _A (self ): return self.user_data["is_private"] def _lowerCamelCase( lowercase__ = "github" ) -> None: '''simple docstring''' import os if os.environ.get('CI' ): return # test failing on GitHub Actions __lowercase= InstagramUser(lowercase__ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , lowercase__ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_5_0 assert instagram_user.number_of_followers > 1_2_0_0_0_0 assert instagram_user.number_of_followings > 1_5 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('https://instagram.' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase = InstagramUser('''github''') print(instagram_user) print(F'{instagram_user.number_of_posts = }') print(F'{instagram_user.number_of_followers = }') print(F'{instagram_user.number_of_followings = }') print(F'{instagram_user.email = }') print(F'{instagram_user.website = }') print(F'{instagram_user.profile_picture_url = }') print(F'{instagram_user.is_verified = }') print(F'{instagram_user.is_private = }')
304
1
def _lowerCamelCase( lowercase__ ) -> bool: '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError('String must only contain alphabetic characters.' ) __lowercase= sorted(string.lower() ) return len(lowercase__ ) == len(set(lowercase__ ) ) if __name__ == "__main__": lowerCAmelCase = input('''Enter a string ''').strip() lowerCAmelCase = is_isogram(input_str) print(F'{input_str} is {"an" if isogram else "not an"} isogram.')
304
from typing import Any import numpy as np def _lowerCamelCase( lowercase__ ) -> bool: '''simple docstring''' return np.array_equal(lowercase__ , matrix.conjugate().T ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' __lowercase= v.conjugate().T __lowercase= v_star.dot(lowercase__ ) assert isinstance(lowercase__ , np.ndarray ) return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ )) def _lowerCamelCase( ) -> None: '''simple docstring''' __lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) __lowercase= np.array([[1], [2], [3]] ) assert is_hermitian(lowercase__ ), F'{a} is not hermitian.' print(rayleigh_quotient(lowercase__ , lowercase__ ) ) __lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(lowercase__ ), F'{a} is not hermitian.' assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
304
1
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) lowerCAmelCase = { '''sample_size''': 3_2, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': 1_0_0_0, '''block_out_channels''': [3_2, 6_4], '''attention_head_dim''': 8, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } lowerCAmelCase = { '''sample_size''': 6_4, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 3, '''num_class_embeds''': 1_0_0_0, '''block_out_channels''': [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4], '''attention_head_dim''': 6_4, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } lowerCAmelCase = { '''sample_size''': 2_5_6, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': None, '''block_out_channels''': [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4], '''attention_head_dim''': 6_4, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''default''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } lowerCAmelCase = { '''num_train_timesteps''': 4_0, '''sigma_min''': 0.0_0_2, '''sigma_max''': 8_0.0, } lowerCAmelCase = { '''num_train_timesteps''': 2_0_1, '''sigma_min''': 0.0_0_2, '''sigma_max''': 8_0.0, } lowerCAmelCase = { '''num_train_timesteps''': 1_5_1, '''sigma_min''': 0.0_0_2, '''sigma_max''': 8_0.0, } def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' if isinstance(lowercase__ , lowercase__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=False ) -> Union[str, Any]: '''simple docstring''' __lowercase= checkpoint[F'{old_prefix}.in_layers.0.weight'] __lowercase= checkpoint[F'{old_prefix}.in_layers.0.bias'] __lowercase= checkpoint[F'{old_prefix}.in_layers.2.weight'] __lowercase= checkpoint[F'{old_prefix}.in_layers.2.bias'] __lowercase= checkpoint[F'{old_prefix}.emb_layers.1.weight'] __lowercase= checkpoint[F'{old_prefix}.emb_layers.1.bias'] __lowercase= checkpoint[F'{old_prefix}.out_layers.0.weight'] __lowercase= checkpoint[F'{old_prefix}.out_layers.0.bias'] __lowercase= checkpoint[F'{old_prefix}.out_layers.3.weight'] __lowercase= checkpoint[F'{old_prefix}.out_layers.3.bias'] if has_skip: __lowercase= checkpoint[F'{old_prefix}.skip_connection.weight'] __lowercase= checkpoint[F'{old_prefix}.skip_connection.bias'] return new_checkpoint def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ) -> Union[str, Any]: '''simple docstring''' __lowercase, __lowercase, __lowercase= checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 , dim=0 ) __lowercase, __lowercase, __lowercase= checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 , dim=0 ) __lowercase= checkpoint[F'{old_prefix}.norm.weight'] __lowercase= checkpoint[F'{old_prefix}.norm.bias'] __lowercase= weight_q.squeeze(-1 ).squeeze(-1 ) __lowercase= bias_q.squeeze(-1 ).squeeze(-1 ) __lowercase= weight_k.squeeze(-1 ).squeeze(-1 ) __lowercase= bias_k.squeeze(-1 ).squeeze(-1 ) __lowercase= weight_v.squeeze(-1 ).squeeze(-1 ) __lowercase= bias_v.squeeze(-1 ).squeeze(-1 ) __lowercase= ( checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 ) ) __lowercase= checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def _lowerCamelCase( lowercase__ , lowercase__ ) -> int: '''simple docstring''' __lowercase= torch.load(lowercase__ , map_location='cpu' ) __lowercase= {} __lowercase= checkpoint['time_embed.0.weight'] __lowercase= checkpoint['time_embed.0.bias'] __lowercase= checkpoint['time_embed.2.weight'] __lowercase= checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: __lowercase= checkpoint['label_emb.weight'] __lowercase= checkpoint['input_blocks.0.0.weight'] __lowercase= checkpoint['input_blocks.0.0.bias'] __lowercase= unet_config['down_block_types'] __lowercase= unet_config['layers_per_block'] __lowercase= unet_config['attention_head_dim'] __lowercase= unet_config['block_out_channels'] __lowercase= 1 __lowercase= channels_list[0] for i, layer_type in enumerate(lowercase__ ): __lowercase= channels_list[i] __lowercase= current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(lowercase__ ): __lowercase= F'down_blocks.{i}.resnets.{j}' __lowercase= F'input_blocks.{current_layer}.0' __lowercase= True if j == 0 and downsample_block_has_skip else False __lowercase= convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(lowercase__ ): __lowercase= F'down_blocks.{i}.resnets.{j}' __lowercase= F'input_blocks.{current_layer}.0' __lowercase= True if j == 0 and downsample_block_has_skip else False __lowercase= convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ ) __lowercase= F'down_blocks.{i}.attentions.{j}' __lowercase= F'input_blocks.{current_layer}.1' __lowercase= convert_attention( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) current_layer += 1 if i != len(lowercase__ ) - 1: __lowercase= F'down_blocks.{i}.downsamplers.0' __lowercase= F'input_blocks.{current_layer}.0' __lowercase= convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) current_layer += 1 __lowercase= current_channels # hardcoded the mid-block for now __lowercase= 'mid_block.resnets.0' __lowercase= 'middle_block.0' __lowercase= convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) __lowercase= 'mid_block.attentions.0' __lowercase= 'middle_block.1' __lowercase= convert_attention(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) __lowercase= 'mid_block.resnets.1' __lowercase= 'middle_block.2' __lowercase= convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) __lowercase= 0 __lowercase= unet_config['up_block_types'] for i, layer_type in enumerate(lowercase__ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): __lowercase= F'up_blocks.{i}.resnets.{j}' __lowercase= F'output_blocks.{current_layer}.0' __lowercase= convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ ) current_layer += 1 if i != len(lowercase__ ) - 1: __lowercase= F'up_blocks.{i}.upsamplers.0' __lowercase= F'output_blocks.{current_layer-1}.1' __lowercase= convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): __lowercase= F'up_blocks.{i}.resnets.{j}' __lowercase= F'output_blocks.{current_layer}.0' __lowercase= convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ ) __lowercase= F'up_blocks.{i}.attentions.{j}' __lowercase= F'output_blocks.{current_layer}.1' __lowercase= convert_attention( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) current_layer += 1 if i != len(lowercase__ ) - 1: __lowercase= F'up_blocks.{i}.upsamplers.0' __lowercase= F'output_blocks.{current_layer-1}.2' __lowercase= convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) __lowercase= checkpoint['out.0.weight'] __lowercase= checkpoint['out.0.bias'] __lowercase= checkpoint['out.2.weight'] __lowercase= checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''') parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.''' ) parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''') lowerCAmelCase = parser.parse_args() lowerCAmelCase = strabool(args.class_cond) lowerCAmelCase = os.path.basename(args.unet_path) print(F'Checkpoint: {ckpt_name}') # Get U-Net config if "imagenet64" in ckpt_name: lowerCAmelCase = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowerCAmelCase = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: lowerCAmelCase = TEST_UNET_CONFIG else: raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.') if not args.class_cond: lowerCAmelCase = None lowerCAmelCase = con_pt_to_diffuser(args.unet_path, unet_config) lowerCAmelCase = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: lowerCAmelCase = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: lowerCAmelCase = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowerCAmelCase = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.') lowerCAmelCase = CMStochasticIterativeScheduler(**scheduler_config) lowerCAmelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
304
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase = logging.get_logger(__name__) class A ( A_ ): UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask'''] def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ): super().__init__( feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= spectrogram_length __lowercase= num_channels __lowercase= patch_size __lowercase= feature_size // self.patch_size[1] __lowercase= n_fft __lowercase= sampling_rate // hop_length_to_sampling_rate __lowercase= sampling_rate __lowercase= padding_value __lowercase= mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T def _A (self , lowerCAmelCase ): __lowercase= spectrogram( lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , ) __lowercase= log_spec[:, :-1] __lowercase= log_spec - 20.0 __lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled' f' with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) __lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) __lowercase= is_batched_numpy or ( isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ): __lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa ) elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __lowercase= raw_speech.astype(np.floataa ) # always return batch if not is_batched: __lowercase= [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __lowercase= [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , lowerCAmelCase ): __lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __lowercase= max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __lowercase= [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __lowercase= np.array(lowerCAmelCase ).astype(np.floataa ) # convert into correct format for padding __lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __lowercase= padded_audio_features * self.padding_value for i in range(len(lowerCAmelCase ) ): __lowercase= audio_features[i] __lowercase= feature # return as BatchFeature if return_attention_mask: __lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: __lowercase= {'audio_values': padded_audio_features} __lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase ) return encoded_inputs
304
1
from typing import TYPE_CHECKING from ...utils import _LazyModule lowerCAmelCase = {'''tokenization_byt5''': ['''ByT5Tokenizer''']} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
304
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' __lowercase= create_tensor(lowercase__ ) __lowercase= gather(lowercase__ ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' __lowercase= [state.process_index] __lowercase= gather_object(lowercase__ ) assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}' assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}' def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' __lowercase= create_tensor(lowercase__ ) __lowercase= broadcast(lowercase__ ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def _lowerCamelCase( lowercase__ ) -> List[Any]: '''simple docstring''' if state.is_main_process: __lowercase= torch.arange(state.num_processes + 1 ).to(state.device ) else: __lowercase= torch.arange(state.num_processes ).to(state.device ) __lowercase= pad_across_processes(lowercase__ ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def _lowerCamelCase( lowercase__ ) -> Any: '''simple docstring''' if state.num_processes != 2: return __lowercase= create_tensor(lowercase__ ) __lowercase= reduce(lowercase__ , 'sum' ) __lowercase= torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}' def _lowerCamelCase( lowercase__ ) -> Union[str, Any]: '''simple docstring''' if state.num_processes != 2: return __lowercase= create_tensor(lowercase__ ) __lowercase= reduce(lowercase__ , 'mean' ) __lowercase= torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}' def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' main() def _lowerCamelCase( ) -> List[str]: '''simple docstring''' __lowercase= PartialState() state.print(F'State: {state}' ) state.print('testing gather' ) test_gather(lowercase__ ) state.print('testing gather_object' ) test_gather_object(lowercase__ ) state.print('testing broadcast' ) test_broadcast(lowercase__ ) state.print('testing pad_across_processes' ) test_pad_across_processes(lowercase__ ) state.print('testing reduce_sum' ) test_reduce_sum(lowercase__ ) state.print('testing reduce_mean' ) test_reduce_mean(lowercase__ ) if __name__ == "__main__": main()
304
1
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=3 , lowerCAmelCase=3_2 , lowerCAmelCase=3 , lowerCAmelCase=1_0 , lowerCAmelCase=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase=[1, 1, 2, 1] , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="relu" , lowerCAmelCase=3 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= image_size __lowercase= num_channels __lowercase= embeddings_size __lowercase= hidden_sizes __lowercase= depths __lowercase= is_training __lowercase= use_labels __lowercase= hidden_act __lowercase= num_labels __lowercase= scope __lowercase= len(lowerCAmelCase ) def _A (self ): __lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.num_labels ) __lowercase= self.get_config() return config, pixel_values, labels def _A (self ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= TFResNetModel(config=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= TFResNetForImageClassification(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() __lowercase, __lowercase, __lowercase= config_and_inputs __lowercase= {'pixel_values': pixel_values} return config, inputs_dict @require_tf class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : int =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () UpperCamelCase_ : Optional[int] =( {'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification} if is_tf_available() else {} ) UpperCamelCase_ : Any =False UpperCamelCase_ : Tuple =False UpperCamelCase_ : List[Any] =False UpperCamelCase_ : Union[str, Any] =False UpperCamelCase_ : int =False def _A (self ): __lowercase= TFResNetModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase ) def _A (self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _A (self ): return @unittest.skip(reason='ResNet does not use inputs_embeds' ) def _A (self ): pass @unittest.skip(reason='ResNet does not support input and output embeddings' ) def _A (self ): pass def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= model_class(lowerCAmelCase ) __lowercase= inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def _A (self ): def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= model_class(lowerCAmelCase ) __lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) ) __lowercase= outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowercase= self.model_tester.num_stages self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() __lowercase= ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: __lowercase= layer_type __lowercase= True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase= True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase ) @slow def _A (self ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= TFResNetModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def _lowerCamelCase( ) -> Any: '''simple docstring''' __lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class A ( unittest.TestCase ): @cached_property def _A (self ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _A (self ): __lowercase= TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __lowercase= self.default_image_processor __lowercase= prepare_img() __lowercase= image_processor(images=lowerCAmelCase , return_tensors='tf' ) # forward pass __lowercase= model(**lowerCAmelCase ) # verify the logits __lowercase= tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase ) __lowercase= tf.constant([-11.10_69, -9.78_77, -8.37_77] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCAmelCase , atol=1E-4 ) )
304
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class A ( A_ ): UpperCamelCase_ : torch.FloatTensor UpperCamelCase_ : torch.FloatTensor class A ( A_ , A_ ): UpperCamelCase_ : Dict =1 @register_to_config def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ): # standard deviation of the initial noise distribution __lowercase= sigma_max # setable values __lowercase= None self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = None ): return sample def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ): __lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps __lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ): __lowercase= sigma_min if sigma_min is not None else self.config.sigma_min __lowercase= sigma_max if sigma_max is not None else self.config.sigma_max __lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase , lowerCAmelCase ) __lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) __lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) ) __lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def _A (self , lowerCAmelCase , lowerCAmelCase ): return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) __lowercase= timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) __lowercase= (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda __lowercase= timesteps.to(self.discrete_sigmas.device ) __lowercase= self.discrete_sigmas[timesteps].to(sample.device ) __lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device ) __lowercase= torch.zeros_like(lowerCAmelCase ) __lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods __lowercase= diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): __lowercase= diffusion.unsqueeze(-1 ) __lowercase= drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of __lowercase= randn_tensor( sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype ) __lowercase= sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? __lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction __lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr __lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() __lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() __lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2 __lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term __lowercase= step_size.flatten() while len(step_size.shape ) < len(sample.shape ): __lowercase= step_size.unsqueeze(-1 ) __lowercase= sample + step_size * model_output __lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples __lowercase= timesteps.to(original_samples.device ) __lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps] __lowercase= ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None] ) __lowercase= noise + original_samples return noisy_samples def __len__(self ): return self.config.num_train_timesteps
304
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase = { '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
304
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device lowerCAmelCase = False class A ( unittest.TestCase ): pass @nightly @require_torch_gpu class A ( unittest.TestCase ): def _A (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _A (self ): __lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) __lowercase= torch.manual_seed(0 ) __lowercase= pipe.dual_guided( prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCAmelCase ) __lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= generator.manual_seed(0 ) __lowercase= pipe.dual_guided( prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def _A (self ): __lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= 'cyberpunk 2077' __lowercase= load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) __lowercase= torch.manual_seed(0 ) __lowercase= pipe.dual_guided( prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images __lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 __lowercase= 'A painting of a squirrel eating a burger ' __lowercase= torch.manual_seed(0 ) __lowercase= pipe.text_to_image( prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images __lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 __lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images __lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
304
1
# Imports import numpy as np class A : def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ): self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase ) def _A (self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ): if red is not None: __lowercase= red if green is not None: __lowercase= green if blue is not None: __lowercase= blue if red_edge is not None: __lowercase= red_edge if nir is not None: __lowercase= nir return True def _A (self , lowerCAmelCase="" , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ): self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase ) __lowercase= { 'ARVI2': self.arvaa, 'CCCI': self.ccci, 'CVI': self.cvi, 'GLI': self.gli, 'NDVI': self.ndvi, 'BNDVI': self.bndvi, 'redEdgeNDVI': self.red_edge_ndvi, 'GNDVI': self.gndvi, 'GBNDVI': self.gbndvi, 'GRNDVI': self.grndvi, 'RBNDVI': self.rbndvi, 'PNDVI': self.pndvi, 'ATSAVI': self.atsavi, 'BWDRVI': self.bwdrvi, 'CIgreen': self.ci_green, 'CIrededge': self.ci_rededge, 'CI': self.ci, 'CTVI': self.ctvi, 'GDVI': self.gdvi, 'EVI': self.evi, 'GEMI': self.gemi, 'GOSAVI': self.gosavi, 'GSAVI': self.gsavi, 'Hue': self.hue, 'IVI': self.ivi, 'IPVI': self.ipvi, 'I': self.i, 'RVI': self.rvi, 'MRVI': self.mrvi, 'MSAVI': self.m_savi, 'NormG': self.norm_g, 'NormNIR': self.norm_nir, 'NormR': self.norm_r, 'NGRDI': self.ngrdi, 'RI': self.ri, 'S': self.s, 'IF': self._if, 'DVI': self.dvi, 'TVI': self.tvi, 'NDRE': self.ndre, } try: return funcs[index]() except KeyError: print('Index not in the list!' ) return False def _A (self ): return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def _A (self ): return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def _A (self ): return self.nir * (self.red / (self.green**2)) def _A (self ): return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def _A (self ): return (self.nir - self.red) / (self.nir + self.red) def _A (self ): return (self.nir - self.blue) / (self.nir + self.blue) def _A (self ): return (self.redEdge - self.red) / (self.redEdge + self.red) def _A (self ): return (self.nir - self.green) / (self.nir + self.green) def _A (self ): return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def _A (self ): return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def _A (self ): return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def _A (self ): return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def _A (self , lowerCAmelCase=0.08 , lowerCAmelCase=1.22 , lowerCAmelCase=0.03 ): return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def _A (self ): return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def _A (self ): return (self.nir / self.green) - 1 def _A (self ): return (self.nir / self.redEdge) - 1 def _A (self ): return (self.red - self.blue) / self.red def _A (self ): __lowercase= self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def _A (self ): return self.nir - self.green def _A (self ): return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def _A (self ): __lowercase= (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red) def _A (self , lowerCAmelCase=0.16 ): return (self.nir - self.green) / (self.nir + self.green + y) def _A (self , lowerCAmelCase=0.5 ): return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def _A (self ): return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) ) def _A (self , lowerCAmelCase=None , lowerCAmelCase=None ): return (self.nir - b) / (a * self.red) def _A (self ): return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def _A (self ): return (self.red + self.green + self.blue) / 30.5 def _A (self ): return self.nir / self.red def _A (self ): return (self.rvi() - 1) / (self.rvi() + 1) def _A (self ): return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def _A (self ): return self.green / (self.nir + self.red + self.green) def _A (self ): return self.nir / (self.nir + self.red + self.green) def _A (self ): return self.red / (self.nir + self.red + self.green) def _A (self ): return (self.green - self.red) / (self.green + self.red) def _A (self ): return (self.red - self.green) / (self.red + self.green) def _A (self ): __lowercase= np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) __lowercase= np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def _A (self ): return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def _A (self ): return self.nir / self.red def _A (self ): return (self.ndvi() + 0.5) ** (1 / 2) def _A (self ): return (self.nir - self.redEdge) / (self.nir + self.redEdge)
304
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase = { '''configuration_xmod''': [ '''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XmodConfig''', '''XmodOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XmodForCausalLM''', '''XmodForMaskedLM''', '''XmodForMultipleChoice''', '''XmodForQuestionAnswering''', '''XmodForSequenceClassification''', '''XmodForTokenClassification''', '''XmodModel''', '''XmodPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
304
1
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType lowerCAmelCase = get_logger(__name__) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=0 ) -> List[Any]: '''simple docstring''' os.makedirs(lowercase__ , exist_ok=lowercase__ ) with FSDP.state_dict_type( lowercase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __lowercase= model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __lowercase= F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' __lowercase= os.path.join(lowercase__ , lowercase__ ) if accelerator.process_index == 0: logger.info(F'Saving model to {output_model_file}' ) torch.save(lowercase__ , lowercase__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __lowercase= ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) __lowercase= os.path.join(lowercase__ , lowercase__ ) logger.info(F'Saving model to {output_model_file}' ) torch.save(lowercase__ , lowercase__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __lowercase= os.path.join(lowercase__ , F'{MODEL_NAME}_{model_index}' ) os.makedirs(lowercase__ , exist_ok=lowercase__ ) logger.info(F'Saving model to {ckpt_dir}' ) __lowercase= {'model': state_dict} dist_cp.save_state_dict( state_dict=lowercase__ , storage_writer=dist_cp.FileSystemWriter(lowercase__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Model saved to {ckpt_dir}' ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=0 ) -> List[Any]: '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( lowercase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(lowercase__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return __lowercase= F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' __lowercase= os.path.join(lowercase__ , lowercase__ ) logger.info(F'Loading model from {input_model_file}' ) __lowercase= torch.load(lowercase__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __lowercase= ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) __lowercase= os.path.join(lowercase__ , lowercase__ ) logger.info(F'Loading model from {input_model_file}' ) __lowercase= torch.load(lowercase__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __lowercase= ( os.path.join(lowercase__ , F'{MODEL_NAME}_{model_index}' ) if F'{MODEL_NAME}' not in input_dir else input_dir ) logger.info(F'Loading model from {ckpt_dir}' ) __lowercase= {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=lowercase__ , storage_reader=dist_cp.FileSystemReader(lowercase__ ) , planner=DefaultLoadPlanner() , ) __lowercase= state_dict['model'] logger.info(F'Model loaded from {ckpt_dir}' ) model.load_state_dict(lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=0 ) -> Dict: '''simple docstring''' os.makedirs(lowercase__ , exist_ok=lowercase__ ) with FSDP.state_dict_type( lowercase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __lowercase= FSDP.optim_state_dict(lowercase__ , lowercase__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: __lowercase= ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) __lowercase= os.path.join(lowercase__ , lowercase__ ) logger.info(F'Saving Optimizer state to {output_optimizer_file}' ) torch.save(lowercase__ , lowercase__ ) logger.info(F'Optimizer state saved in {output_optimizer_file}' ) else: __lowercase= os.path.join(lowercase__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) os.makedirs(lowercase__ , exist_ok=lowercase__ ) logger.info(F'Saving Optimizer state to {ckpt_dir}' ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(lowercase__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Optimizer state saved in {ckpt_dir}' ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=0 ) -> Dict: '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( lowercase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __lowercase= None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: __lowercase= ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) __lowercase= os.path.join(lowercase__ , lowercase__ ) logger.info(F'Loading Optimizer state from {input_optimizer_file}' ) __lowercase= torch.load(lowercase__ ) logger.info(F'Optimizer state loaded from {input_optimizer_file}' ) else: __lowercase= ( os.path.join(lowercase__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) if F'{OPTIMIZER_NAME}' not in input_dir else input_dir ) logger.info(F'Loading Optimizer from {ckpt_dir}' ) __lowercase= load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(lowercase__ ) , ) __lowercase= optim_state['optimizer'] logger.info(F'Optimizer loaded from {ckpt_dir}' ) __lowercase= FSDP.optim_state_dict_to_load(lowercase__ , lowercase__ , lowercase__ ) optimizer.load_state_dict(lowercase__ )
304
import math from datetime import datetime, timedelta def _lowerCamelCase( lowercase__ ) -> datetime: '''simple docstring''' __lowercase= year % 1_9 __lowercase= year % 4 __lowercase= year % 7 __lowercase= math.floor(year / 1_0_0 ) __lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 ) __lowercase= leap_day_inhibits / 4 __lowercase= ( 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 3_0 __lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 __lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0 # PHM -> Paschal Full Moon __lowercase= ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 2_9 and days_from_phm_to_sunday == 6: return datetime(lowercase__ , 4 , 1_9 ) elif days_to_add == 2_8 and days_from_phm_to_sunday == 6: return datetime(lowercase__ , 4 , 1_8 ) else: return datetime(lowercase__ , 3 , 2_2 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3): lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was''' print(F'Easter in {year} {tense} {gauss_easter(year)}')
304
1
import unittest from transformers import DonutProcessor lowerCAmelCase = '''naver-clova-ix/donut-base''' class A ( unittest.TestCase ): def _A (self ): __lowercase= DonutProcessor.from_pretrained(lowerCAmelCase ) def _A (self ): __lowercase= { 'name': 'John Doe', 'age': '99', 'city': 'Atlanta', 'state': 'GA', 'zip': '30301', 'phone': '123-4567', 'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}], } __lowercase= ( '<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>' '<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>' '<s_nicknames><s_nickname>Johnny</s_nickname>' '<sep/><s_nickname>JD</s_nickname></s_nicknames>' ) __lowercase= self.processor.tokenajson(lowerCAmelCase ) self.assertDictEqual(lowerCAmelCase , lowerCAmelCase )
304
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class A ( A_ ): UpperCamelCase_ : Optional[int] ='''blenderbot-small''' UpperCamelCase_ : Optional[Any] =['''past_key_values'''] UpperCamelCase_ : Optional[int] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=5_1_2 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ): __lowercase= vocab_size __lowercase= max_position_embeddings __lowercase= d_model __lowercase= encoder_ffn_dim __lowercase= encoder_layers __lowercase= encoder_attention_heads __lowercase= decoder_ffn_dim __lowercase= decoder_layers __lowercase= decoder_attention_heads __lowercase= dropout __lowercase= attention_dropout __lowercase= activation_dropout __lowercase= activation_function __lowercase= init_std __lowercase= encoder_layerdrop __lowercase= decoder_layerdrop __lowercase= use_cache __lowercase= encoder_layers __lowercase= scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , ) class A ( A_ ): @property def _A (self ): if self.task in ["default", "seq2seq-lm"]: __lowercase= OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: __lowercase= {0: 'batch'} __lowercase= {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __lowercase= {0: 'batch', 1: 'decoder_sequence'} __lowercase= {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' ) elif self.task == "causal-lm": # TODO: figure this case out. __lowercase= OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: __lowercase, __lowercase= self.num_layers for i in range(lowerCAmelCase ): __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} else: __lowercase= OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ] ) return common_inputs @property def _A (self ): if self.task in ["default", "seq2seq-lm"]: __lowercase= super().outputs else: __lowercase= super(lowerCAmelCase , self ).outputs if self.use_past: __lowercase, __lowercase= self.num_layers for i in range(lowerCAmelCase ): __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} __lowercase= {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # Generate decoder inputs __lowercase= seq_length if not self.use_past else 1 __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) __lowercase= {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} __lowercase= dict(**lowerCAmelCase , **lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __lowercase, __lowercase= common_inputs['input_ids'].shape __lowercase= common_inputs['decoder_input_ids'].shape[1] __lowercase, __lowercase= self.num_attention_heads __lowercase= ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase= decoder_seq_length + 3 __lowercase= ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __lowercase= torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 ) __lowercase= [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __lowercase, __lowercase= self.num_layers __lowercase= min(lowerCAmelCase , lowerCAmelCase ) __lowercase= max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers __lowercase= 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(lowerCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), ) ) # TODO: test this. __lowercase= encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(lowerCAmelCase , lowerCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) ) return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __lowercase, __lowercase= common_inputs['input_ids'].shape # Not using the same length for past_key_values __lowercase= seqlen + 2 __lowercase, __lowercase= self.num_layers __lowercase, __lowercase= self.num_attention_heads __lowercase= ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase= common_inputs['attention_mask'].dtype __lowercase= torch.cat( [common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 ) __lowercase= [ (torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase ) ] return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __lowercase= compute_effective_axis_dimension( lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __lowercase= tokenizer.num_special_tokens_to_add(lowerCAmelCase ) __lowercase= compute_effective_axis_dimension( lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence __lowercase= [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size __lowercase= dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) ) return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): if self.task in ["default", "seq2seq-lm"]: __lowercase= self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) elif self.task == "causal-lm": __lowercase= self._generate_dummy_inputs_for_causal_lm( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) else: __lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) return common_inputs def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if self.task in ["default", "seq2seq-lm"]: __lowercase= super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) else: __lowercase= super(lowerCAmelCase , self )._flatten_past_key_values_( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
304
1
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[str]: '''simple docstring''' __lowercase= k_size // 2 __lowercase, __lowercase= mgrid[0 - center : k_size - center, 0 - center : k_size - center] __lowercase= 1 / (2 * pi * sigma) * exp(-(square(lowercase__ ) + square(lowercase__ )) / (2 * square(lowercase__ )) ) return g def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase, __lowercase= image.shape[0], image.shape[1] # dst image height and width __lowercase= height - k_size + 1 __lowercase= width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows __lowercase= zeros((dst_height * dst_width, k_size * k_size) ) __lowercase= 0 for i, j in product(range(lowercase__ ) , range(lowercase__ ) ): __lowercase= ravel(image[i : i + k_size, j : j + k_size] ) __lowercase= window row += 1 # turn the kernel into shape(k*k, 1) __lowercase= gen_gaussian_kernel(lowercase__ , lowercase__ ) __lowercase= ravel(lowercase__ ) # reshape and get the dst image __lowercase= dot(lowercase__ , lowercase__ ).reshape(lowercase__ , lowercase__ ).astype(lowercase__ ) return dst if __name__ == "__main__": # read original image lowerCAmelCase = imread(R'''../image_data/lena.jpg''') # turn image in gray scale value lowerCAmelCase = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size lowerCAmelCase = gaussian_filter(gray, 3, sigma=1) lowerCAmelCase = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow('''gaussian filter with 3x3 mask''', gaussianaxa) imshow('''gaussian filter with 5x5 mask''', gaussianaxa) waitKey()
304
from math import factorial, radians def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float: '''simple docstring''' __lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians __lowercase= radians(lowercase__ ) __lowercase= angle_in_radians __lowercase= 3 __lowercase= -1 for _ in range(lowercase__ ): result += (b * (angle_in_radians**a)) / factorial(lowercase__ ) __lowercase= -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(lowercase__ , lowercase__ ) if __name__ == "__main__": __import__('''doctest''').testmod()
304
1