code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image SCREAMING_SNAKE_CASE__ : int = ["""text""", """image""", """audio"""] def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :Dict = [] for input_type in input_types: if input_type == "text": inputs.append('''Text input''' ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((5_1_2, 5_1_2) ) ) elif input_type == "audio": inputs.append(torch.ones(3_0_0_0 ) ) elif isinstance(snake_case, snake_case ): inputs.append(create_inputs(snake_case ) ) else: raise ValueError(f'''Invalid type requested: {input_type}''' ) return inputs def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :int = [] for output in outputs: if isinstance(snake_case, (str, AgentText) ): output_types.append('''text''' ) elif isinstance(snake_case, (Image.Image, AgentImage) ): output_types.append('''image''' ) elif isinstance(snake_case, (torch.Tensor, AgentAudio) ): output_types.append('''audio''' ) else: raise ValueError(f'''Invalid output: {output}''' ) return output_types @is_tool_test class lowerCamelCase_ : def A ( self ): """simple docstring""" self.assertTrue(hasattr(self.tool , '''inputs''' ) ) self.assertTrue(hasattr(self.tool , '''outputs''' ) ) __magic_name__ :Union[str, Any] = self.tool.inputs for _input in inputs: if isinstance(_input , __lowerCAmelCase ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) __magic_name__ :Optional[Any] = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = create_inputs(self.tool.inputs ) __magic_name__ :Union[str, Any] = self.tool(*__lowerCAmelCase ) # There is a single output if len(self.tool.outputs ) == 1: __magic_name__ :Union[str, Any] = [outputs] self.assertListEqual(output_types(__lowerCAmelCase ) , self.tool.outputs ) def A ( self ): """simple docstring""" self.assertTrue(hasattr(self.tool , '''description''' ) ) self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) ) self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) ) def A ( self ): """simple docstring""" __magic_name__ :str = create_inputs(self.tool.inputs ) __magic_name__ :Optional[Any] = self.tool(*__lowerCAmelCase ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): __magic_name__ :str = [outputs] self.assertEqual(len(__lowerCAmelCase ) , len(self.tool.outputs ) ) for output, output_type in zip(__lowerCAmelCase , self.tool.outputs ): __magic_name__ :Tuple = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) ) def A ( self ): """simple docstring""" __magic_name__ :List[str] = create_inputs(self.tool.inputs ) __magic_name__ :Tuple = [] for _input, input_type in zip(__lowerCAmelCase , self.tool.inputs ): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error __magic_name__ :Optional[int] = self.tool(*__lowerCAmelCase ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): __magic_name__ :Any = [outputs] self.assertEqual(len(__lowerCAmelCase ) , len(self.tool.outputs ) )
0
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase_ = logging.get_logger(__name__) def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Dict: lowercase : List[str] =R'''\w+[.]\d+''' lowercase : List[str] =re.findall(__magic_name__ , __magic_name__ ) for pat in pats: lowercase : Optional[int] =key.replace(__magic_name__ , '''_'''.join(pat.split('''.''' ) ) ) return key def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> str: lowercase : Dict =pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowercase : str =pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowercase : str =pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowercase : Dict =pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowercase : Tuple =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowercase : Tuple =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowercase : str =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowercase : Optional[Any] =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowercase : Dict =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowercase : Union[str, Any] =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=42 ) -> List[str]: # Step 1: Convert pytorch tensor to numpy lowercase : Optional[Any] ={k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowercase : str =flax_model.init_weights(PRNGKey(__magic_name__ ) ) lowercase : Dict =flatten_dict(__magic_name__ ) lowercase : Dict ={} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowercase : Dict =rename_key(__magic_name__ ) lowercase : Optional[int] =tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowercase , lowercase : Any =rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowercase : Tuple =jnp.asarray(__magic_name__ ) return unflatten_dict(__magic_name__ )
92
0
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class __lowerCamelCase (_a ): _lowercase = (DPMSolverSDEScheduler,) _lowercase = 10 def snake_case_ ( self: Tuple,**A_: List[str] ): '''simple docstring''' __UpperCamelCase = { 'num_train_timesteps': 1100, 'beta_start': 0.0_0_0_1, 'beta_end': 0.0_2, 'beta_schedule': 'linear', 'noise_sampler_seed': 0, } config.update(**A_ ) return config def snake_case_ ( self: List[Any] ): '''simple docstring''' for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=A_ ) def snake_case_ ( self: str ): '''simple docstring''' for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1],[0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=A_,beta_end=A_ ) def snake_case_ ( self: Dict ): '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=A_ ) def snake_case_ ( self: List[str] ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=A_ ) def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = self.scheduler_classes[0] __UpperCamelCase = self.get_scheduler_config() __UpperCamelCase = scheduler_class(**A_ ) scheduler.set_timesteps(self.num_inference_steps ) __UpperCamelCase = self.dummy_model() __UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma __UpperCamelCase = sample.to(A_ ) for i, t in enumerate(scheduler.timesteps ): __UpperCamelCase = scheduler.scale_model_input(A_,A_ ) __UpperCamelCase = model(A_,A_ ) __UpperCamelCase = scheduler.step(A_,A_,A_ ) __UpperCamelCase = output.prev_sample __UpperCamelCase = torch.sum(torch.abs(A_ ) ) __UpperCamelCase = torch.mean(torch.abs(A_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2 assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3 else: assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3 def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = self.scheduler_classes[0] __UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' ) __UpperCamelCase = scheduler_class(**A_ ) scheduler.set_timesteps(self.num_inference_steps ) __UpperCamelCase = self.dummy_model() __UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma __UpperCamelCase = sample.to(A_ ) for i, t in enumerate(scheduler.timesteps ): __UpperCamelCase = scheduler.scale_model_input(A_,A_ ) __UpperCamelCase = model(A_,A_ ) __UpperCamelCase = scheduler.step(A_,A_,A_ ) __UpperCamelCase = output.prev_sample __UpperCamelCase = torch.sum(torch.abs(A_ ) ) __UpperCamelCase = torch.mean(torch.abs(A_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2 assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2 assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3 else: assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2 assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3 def snake_case_ ( self: List[Any] ): '''simple docstring''' __UpperCamelCase = self.scheduler_classes[0] __UpperCamelCase = self.get_scheduler_config() __UpperCamelCase = scheduler_class(**A_ ) scheduler.set_timesteps(self.num_inference_steps,device=A_ ) __UpperCamelCase = self.dummy_model() __UpperCamelCase = self.dummy_sample_deter.to(A_ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: __UpperCamelCase = scheduler.scale_model_input(A_,A_ ) __UpperCamelCase = model(A_,A_ ) __UpperCamelCase = scheduler.step(A_,A_,A_ ) __UpperCamelCase = output.prev_sample __UpperCamelCase = torch.sum(torch.abs(A_ ) ) __UpperCamelCase = torch.mean(torch.abs(A_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3 else: assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3 def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = self.scheduler_classes[0] __UpperCamelCase = self.get_scheduler_config() __UpperCamelCase = scheduler_class(**A_,use_karras_sigmas=A_ ) scheduler.set_timesteps(self.num_inference_steps,device=A_ ) __UpperCamelCase = self.dummy_model() __UpperCamelCase = self.dummy_sample_deter.to(A_ ) * scheduler.init_noise_sigma __UpperCamelCase = sample.to(A_ ) for t in scheduler.timesteps: __UpperCamelCase = scheduler.scale_model_input(A_,A_ ) __UpperCamelCase = model(A_,A_ ) __UpperCamelCase = scheduler.step(A_,A_,A_ ) __UpperCamelCase = output.prev_sample __UpperCamelCase = torch.sum(torch.abs(A_ ) ) __UpperCamelCase = torch.mean(torch.abs(A_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2 else: assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
1
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. UpperCamelCase_ = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. UpperCamelCase_ = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. UpperCamelCase_ = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, float]: lowercase : int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] ) return (item, float(__magic_name__ )) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, str]: lowercase : Any =random.randint(0 , len(__magic_name__ ) - 1 ) lowercase : Tuple =parent_a[:random_slice] + parent_a[random_slice:] lowercase : List[str] =parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] ) -> str: lowercase : Union[str, Any] =list(__magic_name__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowercase : Dict =random.choice(__magic_name__ ) return "".join(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ) -> list[str]: lowercase : Any =[] # Generate more children proportionally to the fitness score. lowercase : Dict =int(parent_a[1] * 100 ) + 1 lowercase : List[str] =10 if child_n >= 10 else child_n for _ in range(__magic_name__ ): lowercase : List[str] =population_score[random.randint(0 , __magic_name__ )][0] lowercase , lowercase : Dict =crossover(parent_a[0] , __magic_name__ ) # Append new string to the population list. pop.append(mutate(__magic_name__ , __magic_name__ ) ) pop.append(mutate(__magic_name__ , __magic_name__ ) ) return pop def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ) -> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowercase : List[str] =f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(__magic_name__ ) # Verify that the target contains no genes besides the ones inside genes variable. lowercase : Optional[int] =sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowercase : Dict =f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(__magic_name__ ) # Generate random starting population. lowercase : int =[] for _ in range(__magic_name__ ): population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) ) # Just some logs to know what the algorithms is doing. lowercase , lowercase : Optional[int] =0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(__magic_name__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowercase : List[str] =[evaluate(__magic_name__ , __magic_name__ ) for item in population] # Check if there is a matching evolution. lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowercase : Any =population[: int(N_POPULATION / 3 )] population.clear() population.extend(__magic_name__ ) # Normalize population score to be between 0 and 1. lowercase : Dict =[ (item, score / len(__magic_name__ )) for item, score in population_score ] # This is selection for i in range(__magic_name__ ): population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(__magic_name__ ) > N_POPULATION: break if __name__ == "__main__": UpperCamelCase_ = ( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) UpperCamelCase_ = list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\""" ) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
92
0
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 3 ) -> qiskit.result.counts.Counts: if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) _A = QuantumRegister(_snake_case , '''qr''' ) _A = ClassicalRegister(_snake_case , '''cr''' ) _A = QuantumCircuit(_snake_case , _snake_case ) _A = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots _A = Aer.get_backend('''qasm_simulator''' ) _A = execute(_snake_case , _snake_case , shots=10_000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( f'Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}' )
2
'''simple docstring''' import datasets UpperCamelCase_ = """\ @InProceedings{conneau2018xnli, author = \"Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin\", title = \"XNLI: Evaluating Cross-lingual Sentence Representations\", booktitle = \"Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing\", year = \"2018\", publisher = \"Association for Computational Linguistics\", location = \"Brussels, Belgium\", } """ UpperCamelCase_ = """\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). """ UpperCamelCase_ = """ Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: 'accuracy': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric(\"xnli\") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} """ def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Union[str, Any]: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): def lowerCamelCase_ ( self : str ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), '''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
92
0
'''simple docstring''' def A_( A : str): UpperCamelCase = [0] * len(A) for i in range(1 , len(A)): # use last results for better performance - dynamic programming UpperCamelCase = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: UpperCamelCase = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 UpperCamelCase = j return prefix_result def A_( A : str): return max(prefix_function(A)) if __name__ == "__main__": import doctest doctest.testmod()
3
'''simple docstring''' from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class __SCREAMING_SNAKE_CASE : def __init__( self : List[str] , UpperCAmelCase__ : Dict , ): '''simple docstring''' lowercase : Any =parent lowercase : Optional[int] =13 lowercase : Union[str, Any] =7 lowercase : str =30 lowercase : Optional[int] =self.seq_length + self.mem_len lowercase : Dict =15 lowercase : List[str] =True lowercase : Optional[int] =True lowercase : Tuple =99 lowercase : str =[10, 50, 80] lowercase : List[Any] =32 lowercase : Optional[int] =32 lowercase : int =4 lowercase : Any =8 lowercase : List[Any] =128 lowercase : List[str] =2 lowercase : Tuple =2 lowercase : int =None lowercase : Optional[int] =1 lowercase : int =0 lowercase : List[str] =3 lowercase : str =self.vocab_size - 1 lowercase : Tuple =0.01 def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : str =None if self.use_labels: lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Union[str, Any] =TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' random.seed(self.seed ) tf.random.set_seed(self.seed ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Tuple =TFTransfoXLModel(UpperCAmelCase__ ) lowercase , lowercase : Optional[Any] =model(UpperCAmelCase__ ).to_tuple() lowercase : List[str] ={'''input_ids''': input_ids_a, '''mems''': mems_a} lowercase , lowercase : Any =model(UpperCAmelCase__ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : int =TFTransfoXLLMHeadModel(UpperCAmelCase__ ) lowercase , lowercase : Tuple =model(UpperCAmelCase__ ).to_tuple() lowercase : Optional[Any] ={'''input_ids''': input_ids_a, '''labels''': lm_labels} lowercase , lowercase : Optional[int] =model(UpperCAmelCase__ ).to_tuple() lowercase , lowercase : List[str] =model([input_ids_a, mems_a] ).to_tuple() lowercase : int ={'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels} lowercase , lowercase : str =model(UpperCAmelCase__ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Optional[int] =TFTransfoXLForSequenceClassification(UpperCAmelCase__ ) lowercase : Union[str, Any] =model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[Any] =self.prepare_config_and_inputs() ((lowercase) , (lowercase) , (lowercase) , (lowercase)) : Optional[Any] =config_and_inputs lowercase : Union[str, Any] ={'''input_ids''': input_ids_a} return config, inputs_dict @require_tf class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) lowerCamelCase_ = () if is_tf_available() else () lowerCamelCase_ = ( { 'feature-extraction': TFTransfoXLModel, 'text-classification': TFTransfoXLForSequenceClassification, 'text-generation': TFTransfoXLLMHeadModel, 'zero-shot': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Any =TFTransfoXLModelTester(self ) lowercase : Optional[int] =ConfigTester(self , config_class=UpperCAmelCase__ , d_embed=37 ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' self.model_tester.set_seed() lowercase : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.model_tester.set_seed() lowercase : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common() lowercase : int =[TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: lowercase : str =model_class(UpperCAmelCase__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: lowercase : Union[str, Any] =model.get_output_embeddings() assert isinstance(UpperCAmelCase__ , tf.keras.layers.Layer ) lowercase : Any =model.get_bias() assert name is None else: lowercase : Optional[int] =model.get_output_embeddings() assert x is None lowercase : Optional[int] =model.get_bias() assert name is None def lowerCamelCase_ ( self : Any ): '''simple docstring''' # TODO JP: Make TransfoXL XLA compliant pass @slow def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : int =TFTransfoXLModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' ) def lowerCamelCase_ ( self : int ): '''simple docstring''' pass @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @unittest.skip('''Skip test until #12651 is resolved.''' ) @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Optional[Any] =TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' ) # fmt: off lowercase : Tuple =tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off lowercase : Optional[int] =[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> lowercase : int =model.generate(UpperCAmelCase__ , max_length=200 , do_sample=UpperCAmelCase__ ) self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__ )
92
0
"""simple docstring""" import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCamelCase : Tuple = logging.get_logger(__name__) __UpperCamelCase : Optional[int] = '''▁''' __UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''} __UpperCamelCase : Tuple = { '''sentencepiece_model_file''': '''sentencepiece.bpe.model''', '''vocab_file''': '''vocab.txt''', } __UpperCamelCase : Tuple = { '''vocab_file''': { '''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''', '''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''', }, '''sentencepiece_model_file''': { '''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''', '''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''', }, } __UpperCamelCase : Dict = { '''ernie-m-base''': 514, '''ernie-m-large''': 514, } __UpperCamelCase : Dict = { '''ernie-m-base''': {'''do_lower_case''': False}, '''ernie-m-large''': {'''do_lower_case''': False}, } class a ( a__ ): snake_case__ = ["input_ids"] snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_INIT_CONFIGURATION snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = RESOURCE_FILES_NAMES def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ): """simple docstring""" lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , ) lowerCAmelCase = do_lower_case lowerCAmelCase = sentencepiece_model_ckpt lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_snake_case ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: lowerCAmelCase = self.load_vocab(filepath=_snake_case ) else: lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )} lowerCAmelCase = {v: k for k, v in self.vocab.items()} def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if text is None: return None lowerCAmelCase = self.tokenize(_snake_case ) lowerCAmelCase ,lowerCAmelCase = '', [] for i, ch in enumerate(_snake_case ): if ch in self.SP_CHAR_MAPPING: lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case ) else: lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case ) if self.is_whitespace(_snake_case ): continue normalized_text += ch char_mapping.extend([i] * len(_snake_case ) ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0 if self.do_lower_case: lowerCAmelCase = text.lower() for token in split_tokens: if token[:1] == "▁": lowerCAmelCase = token[1:] lowerCAmelCase = text[offset:].index(_snake_case ) + offset lowerCAmelCase = start + len(_snake_case ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) lowerCAmelCase = end return token_mapping @property def UpperCamelCase__ ( self ): """simple docstring""" return len(self.vocab ) def UpperCamelCase__ ( self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self ): """simple docstring""" lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None return state def __setstate__( self , _snake_case ): """simple docstring""" lowerCAmelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) ) def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ): """simple docstring""" if self.sp_model_kwargs.get('enable_sampling' ) is True: lowerCAmelCase = True if self.sp_model_kwargs.get('alpha' ) is not None: lowerCAmelCase = self.sp_model_kwargs.get('alpha' ) if self.sp_model_kwargs.get('nbest_size' ) is not None: lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' ) if not enable_sampling: lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case ) else: lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = [] for pi, piece in enumerate(_snake_case ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(_snake_case ) and pi != 0: new_pieces.append(_snake_case ) continue else: continue lowerCAmelCase = 0 for i, chunk in enumerate(_snake_case ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(_snake_case ) lowerCAmelCase = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowerCAmelCase = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowerCAmelCase = i if len(_snake_case ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip() return out_string def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = self.convert_ids_to_tokens(_snake_case ) lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip() return out_string def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return self.reverse_vocab.get(_snake_case , self.unk_token ) def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1] return [1] + ([0] * len(_snake_case )) + [1] def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" if token_ids_a is None: # [CLS] X [SEP] return (len(_snake_case ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(_snake_case ) == 1: lowerCAmelCase = unicodedata.category(_snake_case ) if cat == "Zs": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = {} with io.open(_snake_case , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(_snake_case ): lowerCAmelCase = line.rstrip('\n' ) lowerCAmelCase = int(_snake_case ) return token_to_idx def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = 0 if os.path.isdir(_snake_case ): lowerCAmelCase = os.path.join( _snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(_snake_case , 'w' , encoding='utf-8' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ): if index != token_index: logger.warning( F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' ' Please check that the vocabulary is not corrupted!' ) lowerCAmelCase = token_index writer.write(token + '\n' ) index += 1 lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' ) with open(_snake_case , 'wb' ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (vocab_file,)
4
'''simple docstring''' import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __SCREAMING_SNAKE_CASE : def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Optional[Any]=36 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Tuple=None , ): '''simple docstring''' lowercase : str =parent lowercase : int =batch_size lowercase : Any =seq_length lowercase : int =is_training lowercase : str =use_input_mask lowercase : int =use_token_type_ids lowercase : Dict =use_labels lowercase : int =vocab_size lowercase : str =embedding_size lowercase : Union[str, Any] =hidden_size lowercase : Tuple =num_hidden_layers lowercase : Any =num_hidden_groups lowercase : Union[str, Any] =num_attention_heads lowercase : Any =intermediate_size lowercase : Tuple =hidden_act lowercase : Optional[int] =hidden_dropout_prob lowercase : Union[str, Any] =attention_probs_dropout_prob lowercase : List[Any] =max_position_embeddings lowercase : int =type_vocab_size lowercase : int =type_sequence_label_size lowercase : Any =initializer_range lowercase : List[Any] =num_labels lowercase : int =num_choices lowercase : Optional[int] =scope def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Optional[int] =None if self.use_input_mask: lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] ) lowercase : Dict =None if self.use_token_type_ids: lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase : Tuple =None lowercase : Any =None lowercase : Dict =None if self.use_labels: lowercase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices ) lowercase : Any =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ): '''simple docstring''' lowercase : int =AlbertModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : Dict =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : int =model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ): '''simple docstring''' lowercase : Tuple =AlbertForPreTraining(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , sentence_order_label=UpperCAmelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Tuple =AlbertForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : List[str] =AlbertForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : List[str] =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Optional[Any] =self.num_labels lowercase : Any =AlbertForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Dict =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ): '''simple docstring''' lowercase : List[Any] =self.num_labels lowercase : str =AlbertForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Optional[int] =self.num_choices lowercase : List[Any] =AlbertForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : List[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : List[str] =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Union[str, Any] =self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Dict =config_and_inputs lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase_ = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase_ = True def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=False ): '''simple docstring''' lowercase : Optional[int] =super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if return_labels: if model_class in get_values(UpperCAmelCase__ ): lowercase : Any =torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ ) lowercase : Any =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) return inputs_dict def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Tuple =AlbertModelTester(self ) lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase : Tuple =type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : str =AlbertModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : int =AlbertModel.from_pretrained('''albert-base-v2''' ) lowercase : Optional[int] =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowercase : Any =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase : Any =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] lowercase : int =torch.Size((1, 11, 768) ) self.assertEqual(output.shape , UpperCAmelCase__ ) lowercase : Union[str, Any] =torch.tensor( [[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
92
0
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class UpperCAmelCase_ : '''simple docstring''' _lowercase : int _lowercase : int class UpperCAmelCase_ : '''simple docstring''' def __init__( self , _lowercase ): """simple docstring""" _lowerCAmelCase = [[] for _ in range(_lowercase )] _lowerCAmelCase = size def __getitem__( self , _lowercase ): """simple docstring""" return iter(self._graph[vertex] ) @property def _lowercase ( self ): """simple docstring""" return self._size def _lowercase ( self , _lowercase , _lowercase , _lowercase ): """simple docstring""" if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(_lowercase , _lowercase ) ) def _lowercase ( self , _lowercase , _lowercase ): """simple docstring""" _lowerCAmelCase = deque([start_vertex] ) _lowerCAmelCase = [None] * self.size _lowerCAmelCase = 0 while queue: _lowerCAmelCase = queue.popleft() _lowerCAmelCase = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: _lowerCAmelCase = current_distance + edge.weight _lowerCAmelCase = distances[edge.destination_vertex] if ( isinstance(_lowercase , _lowercase ) and new_distance >= dest_vertex_distance ): continue _lowerCAmelCase = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
5
'''simple docstring''' import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ): '''simple docstring''' if dst_width < 0 or dst_height < 0: raise ValueError('''Destination width/height should be > 0''' ) lowercase : Union[str, Any] =img lowercase : Union[str, Any] =img.shape[1] lowercase : str =img.shape[0] lowercase : Union[str, Any] =dst_width lowercase : str =dst_height lowercase : str =self.src_w / self.dst_w lowercase : Optional[Any] =self.src_h / self.dst_h lowercase : int =( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255 ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' for i in range(self.dst_h ): for j in range(self.dst_w ): lowercase : List[Any] =self.img[self.get_y(UpperCAmelCase__ )][self.get_x(UpperCAmelCase__ )] def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ): '''simple docstring''' return int(self.ratio_x * x ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int ): '''simple docstring''' return int(self.ratio_y * y ) if __name__ == "__main__": UpperCamelCase_ , UpperCamelCase_ = 800, 600 UpperCamelCase_ = imread("""image_data/lena.jpg""", 1) UpperCamelCase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output ) waitKey(0) destroyAllWindows()
92
0
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor _lowerCamelCase = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :Optional[int] , *__A :Optional[Any] , **__A :Optional[Any] ) -> None: """simple docstring""" warnings.warn( """The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DPTImageProcessor instead.""" , __A , ) super().__init__(*__A , **__A )
6
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float: lowercase : Any =0.0_0 lowercase : Tuple =0 for resistor in resistors: if resistor <= 0: lowercase : Dict =f'''Resistor at index {index} has a negative or zero value!''' raise ValueError(__magic_name__ ) first_sum += 1 / float(__magic_name__ ) index += 1 return 1 / first_sum def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float: lowercase : Optional[Any] =0.0_0 lowercase : int =0 for resistor in resistors: sum_r += resistor if resistor < 0: lowercase : Tuple =f'''Resistor at index {index} has a negative value!''' raise ValueError(__magic_name__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
92
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a = { '''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ '''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GraphormerForGraphClassification''', '''GraphormerModel''', '''GraphormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
7
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""", """self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""", """self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""", """self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""", """self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""", """self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""", """self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""", """self_attn.rotary_emb""": """encoder.embed_positions""", """self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""", """conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""", """conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""", """conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""", """conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""", """conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""", """ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""", """ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""", """ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""", """ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""", """ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""", """ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } UpperCamelCase_ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> str: for attribute in key.split('''.''' ): lowercase : Tuple =getattr(__magic_name__ , __magic_name__ ) if weight_type is not None: lowercase : Optional[int] =getattr(__magic_name__ , __magic_name__ ).shape else: lowercase : List[Any] =hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase : Any =value elif weight_type == "weight_g": lowercase : List[Any] =value elif weight_type == "weight_v": lowercase : Union[str, Any] =value elif weight_type == "bias": lowercase : Tuple =value elif weight_type == "running_mean": lowercase : Union[str, Any] =value elif weight_type == "running_var": lowercase : str =value elif weight_type == "num_batches_tracked": lowercase : Tuple =value elif weight_type == "inv_freq": lowercase : Optional[Any] =value else: lowercase : Tuple =value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]: lowercase : Optional[int] =[] lowercase : Tuple =fairseq_model.state_dict() lowercase : List[Any] =hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): lowercase : Tuple =False if "conv_layers" in name: load_conv_layer( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , ) lowercase : List[Any] =True else: for key, mapped_key in MAPPING.items(): lowercase : Optional[int] ='''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: lowercase : Union[str, Any] =True if "*" in mapped_key: lowercase : Optional[int] =name.split(__magic_name__ )[0].split('''.''' )[-2] lowercase : List[str] =mapped_key.replace('''*''' , __magic_name__ ) if "pos_bias_u" in name: lowercase : Optional[Any] =None elif "pos_bias_v" in name: lowercase : Union[str, Any] =None elif "weight_g" in name: lowercase : Any ='''weight_g''' elif "weight_v" in name: lowercase : Tuple ='''weight_v''' elif "bias" in name: lowercase : Optional[int] ='''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase : Optional[int] ='''weight''' elif "running_mean" in name: lowercase : Union[str, Any] ='''running_mean''' elif "inv_freq" in name: lowercase : Any ='''inv_freq''' elif "running_var" in name: lowercase : Tuple ='''running_var''' elif "num_batches_tracked" in name: lowercase : Dict ='''num_batches_tracked''' else: lowercase : str =None set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) continue if not is_used: unused_weights.append(__magic_name__ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ) -> int: lowercase : Optional[Any] =full_name.split('''conv_layers.''' )[-1] lowercase : Any =name.split('''.''' ) lowercase : List[str] =int(items[0] ) lowercase : Union[str, Any] =int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase : Union[str, Any] =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase : Optional[Any] =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) lowercase : Optional[int] =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase : str =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__magic_name__ ) @torch.no_grad() def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=True ) -> Union[str, Any]: if config_path is not None: lowercase : Optional[Any] =WavaVecaConformerConfig.from_pretrained(__magic_name__ , hidden_act='''swish''' ) else: lowercase : Optional[int] =WavaVecaConformerConfig() if "rope" in checkpoint_path: lowercase : Dict ='''rotary''' if is_finetuned: if dict_path: lowercase : Optional[Any] =Dictionary.load(__magic_name__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowercase : str =target_dict.pad_index lowercase : Union[str, Any] =target_dict.bos_index lowercase : Any =target_dict.eos_index lowercase : Tuple =len(target_dict.symbols ) lowercase : str =os.path.join(__magic_name__ , '''vocab.json''' ) if not os.path.isdir(__magic_name__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__magic_name__ ) ) return os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowercase : Dict =target_dict.indices # fairseq has the <pad> and <s> switched lowercase : str =0 lowercase : List[Any] =1 with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(__magic_name__ , __magic_name__ ) lowercase : List[str] =WavaVecaCTCTokenizer( __magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__magic_name__ , ) lowercase : Optional[int] =True if config.feat_extract_norm == '''layer''' else False lowercase : str =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , ) lowercase : Tuple =WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) processor.save_pretrained(__magic_name__ ) lowercase : str =WavaVecaConformerForCTC(__magic_name__ ) else: lowercase : Tuple =WavaVecaConformerForPreTraining(__magic_name__ ) if is_finetuned: lowercase , lowercase , lowercase : Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: lowercase : Dict =argparse.Namespace(task='''audio_pretraining''' ) lowercase : Optional[int] =fairseq.tasks.setup_task(__magic_name__ ) lowercase , lowercase , lowercase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__magic_name__ ) lowercase : List[Any] =model[0].eval() recursively_load_weights(__magic_name__ , __magic_name__ , not is_finetuned ) hf_wavavec.save_pretrained(__magic_name__ ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCamelCase_ = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
92
0
'''simple docstring''' import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 lowercase__ : Optional[int] = data_utils.TransfoXLTokenizer lowercase__ : str = data_utils.TransfoXLCorpus lowercase__ : Union[str, Any] = data_utils lowercase__ : int = data_utils def _lowerCAmelCase ( __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : List[Any] ) -> Optional[Any]: if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(__snake_case , 'rb' ) as fp: __A : Union[str, Any] = pickle.load(__snake_case , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) __A : Any = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(f'Save vocabulary to {pytorch_vocab_dump_path}' ) __A : Optional[Any] = corpus.vocab.__dict__ torch.save(__snake_case , __snake_case ) __A : List[str] = corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , __snake_case ) __A : Dict = pytorch_dump_folder_path + '/' + CORPUS_NAME print(f'Save dataset to {pytorch_dataset_dump_path}' ) torch.save(__snake_case , __snake_case ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model __A : Any = os.path.abspath(__snake_case ) __A : List[Any] = os.path.abspath(__snake_case ) print(f'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' ) # Initialise PyTorch model if transfo_xl_config_file == "": __A : List[str] = TransfoXLConfig() else: __A : str = TransfoXLConfig.from_json_file(__snake_case ) print(f'Building PyTorch model from configuration: {config}' ) __A : Any = TransfoXLLMHeadModel(__snake_case ) __A : Optional[Any] = load_tf_weights_in_transfo_xl(__snake_case , __snake_case , __snake_case ) # Save pytorch-model __A : Dict = os.path.join(__snake_case , __snake_case ) __A : Optional[int] = os.path.join(__snake_case , __snake_case ) print(f'Save PyTorch model to {os.path.abspath(__snake_case )}' ) torch.save(model.state_dict() , __snake_case ) print(f'Save configuration file to {os.path.abspath(__snake_case )}' ) with open(__snake_case , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowercase__ : List[Any] = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--tf_checkpoint_path''', default='''''', type=str, help='''An optional path to a TensorFlow checkpoint path to be converted.''', ) parser.add_argument( '''--transfo_xl_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--transfo_xl_dataset_file''', default='''''', type=str, help='''An optional dataset file to be converted in a vocabulary.''', ) lowercase__ : int = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
8
'''simple docstring''' import math import flax.linen as nn import jax.numpy as jnp def _lowerCAmelCase ( __magic_name__ : jnp.ndarray , __magic_name__ : int , __magic_name__ : float = 1 , __magic_name__ : float = 1 , __magic_name__ : float = 1.0E4 , __magic_name__ : bool = False , __magic_name__ : float = 1.0 , ) -> jnp.ndarray: assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even''' lowercase : int =float(embedding_dim // 2 ) lowercase : Optional[int] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) lowercase : Any =min_timescale * jnp.exp(jnp.arange(__magic_name__ , dtype=jnp.floataa ) * -log_timescale_increment ) lowercase : List[Any] =jnp.expand_dims(__magic_name__ , 1 ) * jnp.expand_dims(__magic_name__ , 0 ) # scale embeddings lowercase : Tuple =scale * emb if flip_sin_to_cos: lowercase : Dict =jnp.concatenate([jnp.cos(__magic_name__ ), jnp.sin(__magic_name__ )] , axis=1 ) else: lowercase : Any =jnp.concatenate([jnp.sin(__magic_name__ ), jnp.cos(__magic_name__ )] , axis=1 ) lowercase : List[str] =jnp.reshape(__magic_name__ , [jnp.shape(__magic_name__ )[0], embedding_dim] ) return signal class __SCREAMING_SNAKE_CASE ( nn.Module ): lowerCamelCase_ = 32 lowerCamelCase_ = jnp.floataa @nn.compact def __call__( self : Tuple , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : List[Any] =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCAmelCase__ ) lowercase : Any =nn.silu(UpperCAmelCase__ ) lowercase : int =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCAmelCase__ ) return temb class __SCREAMING_SNAKE_CASE ( nn.Module ): lowerCamelCase_ = 32 lowerCamelCase_ = False lowerCamelCase_ = 1 @nn.compact def __call__( self : int , UpperCAmelCase__ : str ): '''simple docstring''' return get_sinusoidal_embeddings( UpperCAmelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
92
0
import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def A ( __UpperCamelCase ) -> Optional[Any]: A__ = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', 'decoder.output_projection.weight', '_float_tensor', 'encoder.embed_positions._float_tensor', 'decoder.embed_positions._float_tensor', ] for k in ignore_keys: state_dict.pop(__UpperCamelCase , __UpperCamelCase ) def A ( __UpperCamelCase ) -> int: A__ , A__ = emb.weight.shape A__ = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase ) A__ = emb.weight.data return lin_layer def A ( __UpperCamelCase ) -> Optional[int]: A__ = torch.load(__UpperCamelCase , map_location='cpu' ) A__ = mam_aaa['args'] or mam_aaa['cfg']['model'] A__ = mam_aaa['model'] remove_ignore_keys_(__UpperCamelCase ) A__ = state_dict['encoder.embed_tokens.weight'].shape[0] A__ = MaMaaaConfig( vocab_size=__UpperCamelCase , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , ) A__ = state_dict['decoder.embed_tokens.weight'] A__ = MaMaaaForConditionalGeneration(__UpperCamelCase ) model.model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase ) A__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') SCREAMING_SNAKE_CASE__ = parser.parse_args() SCREAMING_SNAKE_CASE__ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
9
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) # TODO Update this UpperCamelCase_ = { """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'esm' def __init__( self : Optional[Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Optional[Any]=3072 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=1026 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : int , ): '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : Any =vocab_size lowercase : List[Any] =hidden_size lowercase : Any =num_hidden_layers lowercase : Optional[Any] =num_attention_heads lowercase : Tuple =intermediate_size lowercase : int =hidden_dropout_prob lowercase : Dict =attention_probs_dropout_prob lowercase : Optional[int] =max_position_embeddings lowercase : Union[str, Any] =initializer_range lowercase : Tuple =layer_norm_eps lowercase : Union[str, Any] =position_embedding_type lowercase : List[Any] =use_cache lowercase : Dict =emb_layer_norm_before lowercase : Optional[Any] =token_dropout lowercase : Union[str, Any] =is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) lowercase : Any =EsmFoldConfig() elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase : Optional[int] =EsmFoldConfig(**UpperCAmelCase__ ) lowercase : Union[str, Any] =esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) lowercase : int =get_default_vocab_list() else: lowercase : Tuple =vocab_list else: lowercase : Union[str, Any] =None lowercase : Dict =None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase__ ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Union[str, Any] =super().to_dict() if isinstance(self.esmfold_config , UpperCAmelCase__ ): lowercase : Optional[Any] =self.esmfold_config.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = None lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = 0 lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = 1_28 lowerCamelCase_ = None def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' if self.trunk is None: lowercase : str =TrunkConfig() elif isinstance(self.trunk , UpperCAmelCase__ ): lowercase : int =TrunkConfig(**self.trunk ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =asdict(self ) lowercase : Union[str, Any] =self.trunk.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 48 lowerCamelCase_ = 10_24 lowerCamelCase_ = 1_28 lowerCamelCase_ = 32 lowerCamelCase_ = 32 lowerCamelCase_ = 32 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = False lowerCamelCase_ = 4 lowerCamelCase_ = 1_28 lowerCamelCase_ = None def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' if self.structure_module is None: lowercase : Any =StructureModuleConfig() elif isinstance(self.structure_module , UpperCAmelCase__ ): lowercase : Union[str, Any] =StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) lowercase : str =self.sequence_state_dim // self.sequence_head_width lowercase : int =self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : List[Any] =asdict(self ) lowercase : Any =self.structure_module.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 3_84 lowerCamelCase_ = 1_28 lowerCamelCase_ = 16 lowerCamelCase_ = 1_28 lowerCamelCase_ = 12 lowerCamelCase_ = 4 lowerCamelCase_ = 8 lowerCamelCase_ = 0.1 lowerCamelCase_ = 8 lowerCamelCase_ = 1 lowerCamelCase_ = 2 lowerCamelCase_ = 7 lowerCamelCase_ = 10 lowerCamelCase_ = 1E-8 lowerCamelCase_ = 1E5 def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return asdict(self ) def _lowerCAmelCase ( ) -> Optional[int]: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
92
0
from __future__ import annotations def _snake_case ( __snake_case ): if len(__snake_case ) == 0: return array _UpperCamelCase , _UpperCamelCase = min(__snake_case ), max(__snake_case ) # Compute the variables _UpperCamelCase = _max - _min + 1 _UpperCamelCase , _UpperCamelCase = [0] * holes_range, [0] * holes_range # Make the sorting. for i in array: _UpperCamelCase = i - _min _UpperCamelCase = i holes_repeat[index] += 1 # Makes the array back by replacing the numbers. _UpperCamelCase = 0 for i in range(__snake_case ): while holes_repeat[i] > 0: _UpperCamelCase = holes[i] index += 1 holes_repeat[i] -= 1 # Returns the sorted array. return array if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase = input("Enter numbers separated by comma:\n") _lowerCAmelCase = [int(x) for x in user_input.split(",")] print(pigeon_sort(unsorted))
10
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCamelCase_ = abspath(join(dirname(__file__), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def _lowerCAmelCase ( __magic_name__ : int ) -> Tuple: config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def _lowerCAmelCase ( __magic_name__ : int ) -> Any: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Any ) -> Any: from transformers.testing_utils import pytest_terminal_summary_main lowercase : Optional[Any] =terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> List[str]: # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: lowercase : Optional[int] =0 # Doctest custom flag to ignore output. UpperCamelCase_ = doctest.register_optionflag("""IGNORE_RESULT""") UpperCamelCase_ = doctest.OutputChecker class __SCREAMING_SNAKE_CASE ( lowercase__ ): def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ): '''simple docstring''' if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_ = CustomOutputChecker UpperCamelCase_ = HfDoctestModule UpperCamelCase_ = HfDocTestParser
92
0
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class __A ( datasets.BeamBasedBuilder ): '''simple docstring''' def a__ (self ) -> Optional[int]: """simple docstring""" return datasets.DatasetInfo( features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=A , ) def a__ (self , A , A ) -> Optional[Any]: """simple docstring""" return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )] def a__ (self , A , A ) -> str: """simple docstring""" import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(A ) class __A ( datasets.BeamBasedBuilder ): '''simple docstring''' def a__ (self ) -> int: """simple docstring""" return datasets.DatasetInfo( features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=A , ) def a__ (self , A , A ) -> str: """simple docstring""" return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} ) ] def a__ (self , A , A ) -> int: """simple docstring""" import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(A ) def lowerCAmelCase (): """simple docstring""" return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''])] def lowerCAmelCase (): """simple docstring""" return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''])] class __A ( A ): '''simple docstring''' @require_beam def a__ (self ) -> List[Any]: """simple docstring""" _a = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: _a = DummyBeamDataset(cache_dir=A , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(A , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train.arrow''' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) _a = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , A ) self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def a__ (self ) -> Optional[Any]: """simple docstring""" import apache_beam as beam _a = beam.io.parquetio.WriteToParquet _a = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: _a = DummyBeamDataset(cache_dir=A , beam_runner='''DirectRunner''' ) with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock: _a = partial(A , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( A , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) ) self.assertTrue( os.path.exists( os.path.join( A , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) _a = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , A ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) ) self.assertTrue( os.path.exists(os.path.join(A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def a__ (self ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_cache_dir: _a = DummyBeamDataset(cache_dir=A ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def a__ (self ) -> str: """simple docstring""" _a = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: _a = NestedBeamDataset(cache_dir=A , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(A , builder.name , '''default''' , '''0.0.0''' , f'''{builder.name}-train.arrow''' ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) ) _a = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , A ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , A ) self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset
11
'''simple docstring''' from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = ['pixel_values'] def __init__( self : List[Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 8 , **UpperCAmelCase__ : str , ): '''simple docstring''' super().__init__(**UpperCAmelCase__ ) lowercase : Union[str, Any] =do_rescale lowercase : List[Any] =rescale_factor lowercase : Tuple =do_pad lowercase : List[str] =pad_size def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] ): '''simple docstring''' return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ): '''simple docstring''' lowercase , lowercase : Union[str, Any] =get_image_size(UpperCAmelCase__ ) lowercase : Tuple =(old_height // size + 1) * size - old_height lowercase : Tuple =(old_width // size + 1) * size - old_width return pad(UpperCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : Optional[Any] , ): '''simple docstring''' lowercase : int =do_rescale if do_rescale is not None else self.do_rescale lowercase : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor lowercase : int =do_pad if do_pad is not None else self.do_pad lowercase : List[Any] =pad_size if pad_size is not None else self.pad_size lowercase : Any =make_list_of_images(UpperCAmelCase__ ) if not valid_images(UpperCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. lowercase : Dict =[to_numpy_array(UpperCAmelCase__ ) for image in images] if do_rescale: lowercase : Tuple =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images] if do_pad: lowercase : Union[str, Any] =[self.pad(UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images] lowercase : Dict =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images] lowercase : Any ={'''pixel_values''': images} return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
92
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ : Tuple = { """configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""], """processing_mgp_str""": ["""MgpstrProcessor"""], """tokenization_mgp_str""": ["""MgpstrTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Optional[int] = [ """MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""", """MgpstrModel""", """MgpstrPreTrainedModel""", """MgpstrForSceneTextRecognition""", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
12
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""MBartTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""MBartTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """MBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """MBartForCausalLM""", """MBartForConditionalGeneration""", """MBartForQuestionAnswering""", """MBartForSequenceClassification""", """MBartModel""", """MBartPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """TFMBartForConditionalGeneration""", """TFMBartModel""", """TFMBartPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """FlaxMBartForConditionalGeneration""", """FlaxMBartForQuestionAnswering""", """FlaxMBartForSequenceClassification""", """FlaxMBartModel""", """FlaxMBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Any = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : List[Any] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Dict = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : str = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> str: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Dict = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : str = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : int = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Optional[Any] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Dict = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : List[Any] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Optional[int] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Any = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Optional[int] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Optional[Any] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Optional[int] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : int = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Any = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Tuple = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict: requires_backends(self , ['sentencepiece'] ) class UpperCAmelCase_ (metaclass=_UpperCAmelCase ): """simple docstring""" lowerCamelCase : List[str] = ['sentencepiece'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple: requires_backends(self , ['sentencepiece'] )
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCamelCase_ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
0
import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ) -> int: _a : Dict = '''ZinengTang/tvlt-base''' _a : List[str] = tempfile.mkdtemp() def __lowercase ( self , **_a ) -> int: return TvltImageProcessor.from_pretrained(self.checkpoint , **_a ) def __lowercase ( self , **_a ) -> List[Any]: return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a ) def __lowercase ( self ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def __lowercase ( self ) -> Dict: _a : Union[str, Any] = self.get_image_processor() _a : Dict = self.get_feature_extractor() _a : Optional[int] = TvltProcessor(image_processor=_a , feature_extractor=_a ) processor.save_pretrained(self.tmpdirname ) _a : Any = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , _a ) self.assertIsInstance(processor.image_processor , _a ) def __lowercase ( self ) -> Any: _a : Optional[Any] = self.get_image_processor() _a : Dict = self.get_feature_extractor() _a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a ) _a : Union[str, Any] = np.ones([1_2_0_0_0] ) _a : Dict = feature_extractor(_a , return_tensors='''np''' ) _a : Tuple = processor(audio=_a , return_tensors='''np''' ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __lowercase ( self ) -> int: _a : Optional[Any] = self.get_image_processor() _a : Union[str, Any] = self.get_feature_extractor() _a : Optional[Any] = TvltProcessor(image_processor=_a , feature_extractor=_a ) _a : List[Any] = np.ones([3, 2_2_4, 2_2_4] ) _a : int = image_processor(_a , return_tensors='''np''' ) _a : Optional[int] = processor(images=_a , return_tensors='''np''' ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __lowercase ( self ) -> Union[str, Any]: _a : int = self.get_image_processor() _a : Union[str, Any] = self.get_feature_extractor() _a : Any = TvltProcessor(image_processor=_a , feature_extractor=_a ) _a : List[str] = np.ones([1_2_0_0_0] ) _a : Optional[int] = np.ones([3, 2_2_4, 2_2_4] ) _a : int = processor(audio=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] ) # test if it raises when no input is passed with pytest.raises(_a ): processor() def __lowercase ( self ) -> Union[str, Any]: _a : str = self.get_image_processor() _a : Union[str, Any] = self.get_feature_extractor() _a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
14
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process UpperCamelCase_ = logging.getLogger(__name__) def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : str ) -> Union[str, Any]: return (preds == labels).mean() @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} ) lowerCamelCase_ = field(metadata={'help': 'Should contain the data files for the task.'} ) lowerCamelCase_ = field( default=1_28 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def _lowerCAmelCase ( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase : Dict =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowercase , lowercase , lowercase : List[Any] =parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , __magic_name__ ) # Set seed set_seed(training_args.seed ) try: lowercase : Any =processors[data_args.task_name]() lowercase : Optional[int] =processor.get_labels() lowercase : str =len(__magic_name__ ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase : List[str] =AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) lowercase : int =AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase : Any =AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , ) # Get datasets lowercase : int =( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowercase : Union[str, Any] =( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(__magic_name__ : EvalPrediction ) -> Dict: lowercase : Dict =np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(__magic_name__ , p.label_ids )} # Data collator lowercase : List[str] =DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowercase : Dict =Trainer( model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowercase : Optional[Any] ={} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowercase : List[Any] =trainer.evaluate() lowercase : Any =os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_master(): with open(__magic_name__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ ) writer.write('''%s = %s\n''' % (key, value) ) results.update(__magic_name__ ) return results def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
92
0
A : List[Any] = 0 # The first color of the flag. A : List[str] = 1 # The second color of the flag. A : int = 2 # The third color of the flag. A : str = (red, white, blue) def UpperCamelCase ( __magic_name__ : list ) -> list: """simple docstring""" if not sequence: return [] if len(__magic_name__ ) == 1: return list(__magic_name__ ) lowercase__ = 0 lowercase__ = len(__magic_name__ ) - 1 lowercase__ = 0 while mid <= high: if sequence[mid] == colors[0]: lowercase__ , lowercase__ = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: lowercase__ , lowercase__ = sequence[high], sequence[mid] high -= 1 else: lowercase__ = f'''The elements inside the sequence must contains only {colors} values''' raise ValueError(__magic_name__ ) return sequence if __name__ == "__main__": import doctest doctest.testmod() A : Union[str, Any] = input('Enter numbers separated by commas:\n').strip() A : List[Any] = [int(item.strip()) for item in user_input.split(',')] print(F'{dutch_national_flag_sort(unsorted)}')
15
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase_ = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu""" def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Tuple=100 , __magic_name__ : Optional[int]=" " ) -> List[str]: lowercase : List[Any] =text.split(__magic_name__ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__magic_name__ ) , __magic_name__ )] def _lowerCAmelCase ( __magic_name__ : dict ) -> dict: lowercase , lowercase : int =[], [] for title, text in zip(documents['''title'''] , documents['''text'''] ): if text is not None: for passage in split_text(__magic_name__ ): titles.append(title if title is not None else '''''' ) texts.append(__magic_name__ ) return {"title": titles, "text": texts} def _lowerCAmelCase ( __magic_name__ : dict , __magic_name__ : DPRContextEncoder , __magic_name__ : DPRContextEncoderTokenizerFast ) -> dict: lowercase : Dict =ctx_tokenizer( documents['''title'''] , documents['''text'''] , truncation=__magic_name__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids'''] lowercase : Optional[int] =ctx_encoder(input_ids.to(device=__magic_name__ ) , return_dict=__magic_name__ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def _lowerCAmelCase ( __magic_name__ : "RagExampleArguments" , __magic_name__ : "ProcessingArguments" , __magic_name__ : "IndexHnswArguments" , ) -> str: ###################################### logger.info('''Step 1 - Create the dataset''' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowercase : Tuple =load_dataset( '''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowercase : Optional[int] =dataset.map(__magic_name__ , batched=__magic_name__ , num_proc=processing_args.num_proc ) # And compute the embeddings lowercase : Any =DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__magic_name__ ) lowercase : Any =DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) lowercase : Optional[int] =Features( {'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space lowercase : Optional[Any] =dataset.map( partial(__magic_name__ , ctx_encoder=__magic_name__ , ctx_tokenizer=__magic_name__ ) , batched=__magic_name__ , batch_size=processing_args.batch_size , features=__magic_name__ , ) # And finally save your dataset lowercase : Optional[Any] =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' ) dataset.save_to_disk(__magic_name__ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowercase : Union[str, Any] =faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('''embeddings''' , custom_index=__magic_name__ ) # And save the index lowercase : Dict =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' ) dataset.get_index('''embeddings''' ).save(__magic_name__ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , ) lowerCamelCase_ = field( default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , ) lowerCamelCase_ = field( default='facebook/dpr-ctx_encoder-multiset-base' , metadata={ 'help': ( 'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or' ' \'facebook/dpr-ctx_encoder-multiset-base\'' ) } , ) lowerCamelCase_ = field( default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=lowercase__ , metadata={ 'help': 'The number of processes to use to split the documents into passages. Default is single process.' } , ) lowerCamelCase_ = field( default=16 , metadata={ 'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.' } , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=7_68 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , ) lowerCamelCase_ = field( default=1_28 , metadata={ 'help': ( 'The number of bi-directional links created for every new element during the HNSW index construction.' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase_ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
92
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Optional[int] = logging.get_logger(__name__) __A : Optional[Any] = { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "gpt_neox" def __init__( self : Optional[int] , __lowerCamelCase : List[str]=50432 , __lowerCamelCase : int=6144 , __lowerCamelCase : Optional[Any]=44 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : Optional[int]=24576 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Any=0.25 , __lowerCamelCase : List[Any]=10000 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : str , ): super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = rotary_pct SCREAMING_SNAKE_CASE = rotary_emb_base SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = hidden_dropout SCREAMING_SNAKE_CASE = classifier_dropout SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = tie_word_embeddings SCREAMING_SNAKE_CASE = use_parallel_residual SCREAMING_SNAKE_CASE = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def _snake_case ( self : Union[str, Any] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f"got {self.rope_scaling}" ) SCREAMING_SNAKE_CASE = self.rope_scaling.get("type" , __lowerCamelCase ) SCREAMING_SNAKE_CASE = self.rope_scaling.get("factor" , __lowerCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
16
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right UpperCamelCase_ = 128022 UpperCamelCase_ = 128028 @require_sentencepiece class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = MaMaaaTokenizer lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = True def lowerCamelCase_ ( self : Dict ): '''simple docstring''' super().setUp() lowercase : Dict =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] lowercase : List[Any] =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) lowercase : List[Any] =Path(self.tmpdirname ) save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) lowercase : Tuple =MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : Any , **UpperCAmelCase__ : int ): '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Dict ): '''simple docstring''' return ( "This is a test", "This is a test", ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Tuple ='''</s>''' lowercase : Union[str, Any] =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[Any] =self.get_tokenizer() lowercase : Optional[Any] =list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''</s>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''<s>''' ) self.assertEqual(len(UpperCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip('''Skip this test while all models are still to be uploaded.''' ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Union[str, Any] =self.get_tokenizer() lowercase : str =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2, 3, 4, 5, 6] , ) lowercase : Optional[int] =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) lowercase : Tuple =tokenizer.convert_tokens_to_string(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , '''This is a test''' ) @slow def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' # fmt: off lowercase : int ={'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , ) @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): lowerCamelCase_ = 'facebook/m2m100_418M' lowerCamelCase_ = [ 'In my opinion, there are two levels of response from the French government.', 'NSA Affair Emphasizes Complete Lack of Debate on Intelligence', ] lowerCamelCase_ = [ 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', ] # fmt: off lowerCamelCase_ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2] @classmethod def lowerCamelCase_ ( cls : Optional[Any] ): '''simple docstring''' lowercase : MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' ) lowercase : Optional[int] =1 return cls def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128006 ) self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128022 ) self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128076 ) self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128063 ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : List[str] =self.tokenizer.get_vocab() self.assertEqual(len(UpperCAmelCase__ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab['''<unk>'''] , 3 ) self.assertIn(self.tokenizer.get_lang_token('''en''' ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : List[Any] ='''en''' lowercase : Optional[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids ) # fmt: off lowercase : str =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] # fmt: on lowercase : Optional[Any] =self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) lowercase : Optional[Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Any =tempfile.mkdtemp() lowercase : Tuple =self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(UpperCAmelCase__ ) lowercase : Union[str, Any] =MaMaaaTokenizer.from_pretrained(UpperCAmelCase__ ) self.assertDictEqual(new_tok.lang_token_to_id , UpperCAmelCase__ ) @require_torch def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[str] ='''en''' lowercase : int ='''fr''' lowercase : Union[str, Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , return_tensors='''pt''' ) lowercase : str =shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: lowercase : int =batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Optional[int] ='''mr''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) lowercase : Union[str, Any] ='''zh''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int ='''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) lowercase : Optional[Any] ='''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Optional[Any] =self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' ) self.assertEqual( nested_simplify(UpperCAmelCase__ ) , { # en_XX, A, test, EOS '''input_ids''': [[128022, 58, 4183, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 128006, } , )
92
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[int] = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Dict = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
17
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int = 600851475143 ) -> int: try: lowercase : Any =int(__magic_name__ ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) lowercase : Optional[Any] =2 lowercase : Dict =0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 lowercase : Union[str, Any] =i while n % i == 0: lowercase : Optional[int] =n // i i += 1 return int(__magic_name__ ) if __name__ == "__main__": print(f'''{solution() = }''')
92
0
'''simple docstring''' import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=() , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]="no" , SCREAMING_SNAKE_CASE_ : Any="29500" ): '''simple docstring''' _lowerCAmelCase = False _lowerCAmelCase = False if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ): _lowerCAmelCase = True elif "IPython" in sys.modules: _lowerCAmelCase = "google.colab" in str(sys.modules["IPython"].get_ipython() ) try: _lowerCAmelCase = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' ) if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , SCREAMING_SNAKE_CASE_ ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside " "your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if num_processes is None: _lowerCAmelCase = 8 _lowerCAmelCase = PrepareForLaunch(SCREAMING_SNAKE_CASE_ , distributed_type="TPU" ) print(F'''Launching a training on {num_processes} TPU cores.''' ) xmp.spawn(SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , nprocs=SCREAMING_SNAKE_CASE_ , start_method="fork" ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on one CPU." ) function(*SCREAMING_SNAKE_CASE_ ) else: if num_processes is None: raise ValueError( "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized " "inside your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if torch.cuda.is_initialized(): raise ValueError( "To launch a multi-GPU training from your notebook, you need to avoid running any instruction " "using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA " "function." ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=SCREAMING_SNAKE_CASE_ , master_addr="127.0.01" , master_port=SCREAMING_SNAKE_CASE_ , mixed_precision=SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = PrepareForLaunch(SCREAMING_SNAKE_CASE_ , distributed_type="MULTI_GPU" ) print(F'''Launching training on {num_processes} GPUs.''' ) try: start_processes(SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , nprocs=SCREAMING_SNAKE_CASE_ , start_method="fork" ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( "CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. " "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " "Please review your imports and test them when running the `notebook_launcher()` to identify " "which one is problematic." ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): _lowerCAmelCase = "1" print("Launching training on MPS." ) elif torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on CPU." ) function(*SCREAMING_SNAKE_CASE_ ) def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int]=() , SCREAMING_SNAKE_CASE_ : List[str]=2 ): '''simple docstring''' from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=SCREAMING_SNAKE_CASE_ , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ): _lowerCAmelCase = PrepareForLaunch(SCREAMING_SNAKE_CASE_ , debug=SCREAMING_SNAKE_CASE_ ) start_processes(SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , nprocs=SCREAMING_SNAKE_CASE_ , start_method="fork" )
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'speech_to_text_2' lowerCamelCase_ = ['past_key_values'] lowerCamelCase_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : int , UpperCAmelCase__ : Dict=10000 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2048 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[Any]=1024 , **UpperCAmelCase__ : Dict , ): '''simple docstring''' lowercase : List[str] =vocab_size lowercase : Optional[int] =d_model lowercase : Optional[Any] =decoder_ffn_dim lowercase : Any =decoder_layers lowercase : Dict =decoder_attention_heads lowercase : List[Any] =dropout lowercase : List[Any] =attention_dropout lowercase : Any =activation_dropout lowercase : Optional[Any] =activation_function lowercase : Optional[int] =init_std lowercase : Dict =decoder_layerdrop lowercase : Optional[int] =use_cache lowercase : Optional[Any] =decoder_layers lowercase : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True lowercase : str =max_target_positions super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
92
0
"""simple docstring""" def lowerCamelCase__ ( __snake_case = 60_08_51_47_51_43 ) -> int: """simple docstring""" try: _UpperCamelCase = int(__snake_case ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) _UpperCamelCase = 2 _UpperCamelCase = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 _UpperCamelCase = i while n % i == 0: _UpperCamelCase = n // i i += 1 return int(__snake_case ) if __name__ == "__main__": print(F"""{solution() = }""")
19
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=[10, 20, 30, 40] , UpperCAmelCase__ : Any=[2, 2, 3, 2] , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[Any]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Dict=[2, 3, 4] , UpperCAmelCase__ : Optional[int]=None , ): '''simple docstring''' lowercase : List[Any] =parent lowercase : Tuple =batch_size lowercase : List[str] =image_size lowercase : List[Any] =num_channels lowercase : Union[str, Any] =num_stages lowercase : int =hidden_sizes lowercase : Any =depths lowercase : Tuple =is_training lowercase : str =use_labels lowercase : List[Any] =intermediate_size lowercase : int =hidden_act lowercase : Union[str, Any] =num_labels lowercase : Optional[int] =initializer_range lowercase : int =out_features lowercase : List[str] =out_indices lowercase : str =scope def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase : Dict =None if self.use_labels: lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_labels ) lowercase : Dict =self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Any ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' lowercase : Dict =ConvNextVaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[Any] =model(UpperCAmelCase__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Dict =ConvNextVaForImageClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : str =model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Union[str, Any] =ConvNextVaBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[int] =model(UpperCAmelCase__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase : Optional[Any] =None lowercase : str =ConvNextVaBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[Any] =model(UpperCAmelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Any =self.prepare_config_and_inputs() lowercase , lowercase , lowercase : str =config_and_inputs lowercase : Any ={'''pixel_values''': pixel_values} return config, inputs_dict def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : str =self.prepare_config_and_inputs() lowercase , lowercase , lowercase : List[str] =config_and_inputs lowercase : Optional[Any] ={'''pixel_values''': pixel_values, '''labels''': labels} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCamelCase_ = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Dict =ConvNextVaModelTester(self ) lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Any ): '''simple docstring''' return @unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase , lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_with_labels() lowercase : Optional[int] =True if model_class.__name__ in [ *get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ ), ]: continue lowercase : Dict =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.train() lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) lowercase : List[Any] =model(**UpperCAmelCase__ ).loss loss.backward() def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_with_labels() lowercase : List[Any] =False lowercase : Any =True if ( model_class.__name__ in [*get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ )] or not model_class.supports_gradient_checkpointing ): continue lowercase : Any =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.gradient_checkpointing_enable() model.train() lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) lowercase : int =model(**UpperCAmelCase__ ).loss loss.backward() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Dict =model_class(UpperCAmelCase__ ) lowercase : Union[str, Any] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase : int =[*signature.parameters.keys()] lowercase : Optional[Any] =['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' def check_hidden_states_output(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ): lowercase : int =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): lowercase : Any =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase : Dict =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase : List[Any] =self.model_tester.num_stages self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : List[str] =True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase : Tuple =True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : List[Any] =ConvNextVaModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def _lowerCAmelCase ( ) -> List[Any]: lowercase : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Tuple =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(UpperCAmelCase__ ) lowercase : int =self.default_image_processor lowercase : List[str] =prepare_img() lowercase : List[Any] =preprocessor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ ) # forward pass with torch.no_grad(): lowercase : Dict =model(**UpperCAmelCase__ ) # verify the logits lowercase : Optional[Any] =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase__ ) lowercase : Tuple =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(UpperCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
92
0
_lowerCAmelCase: dict[str, float] = { "km/h": 1.0, "m/s": 3.6, "mph": 1.609_344, "knot": 1.852, } _lowerCAmelCase: dict[str, float] = { "km/h": 1.0, "m/s": 0.277_777_778, "mph": 0.621_371_192, "knot": 0.539_956_803, } def _lowercase( __a : float , __a : str , __a : str ): if unit_to not in speed_chart or unit_from not in speed_chart_inverse: a__ =( f"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n""" f"""Valid values are: {', '.join(__a )}""" ) raise ValueError(__a ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
20
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels UpperCamelCase_ = object() # For specifying empty leaf dict `{}` UpperCamelCase_ = object() def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> Optional[int]: lowercase : Optional[Any] =tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(__magic_name__ ) - len(__magic_name__ ) + 1 ): lowercase : Union[str, Any] =[x.match(__magic_name__ ) for x, y in zip(__magic_name__ , ks[i:] )] if matches and all(__magic_name__ ): return True return False def _lowerCAmelCase ( __magic_name__ : Dict ) -> List[str]: def replace(__magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ): for rule, replacement in rules: if _match(__magic_name__ , __magic_name__ ): return replacement return val return replace def _lowerCAmelCase ( ) -> int: return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , __magic_name__ )), (("transformer", "wte", "embedding"), P('''mp''' , __magic_name__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__magic_name__ , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , __magic_name__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__magic_name__ , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , __magic_name__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _lowerCAmelCase ( __magic_name__ : str ) -> int: lowercase : int =_get_partition_rules() lowercase : Tuple =_replacement_rules(__magic_name__ ) lowercase : Any ={k: _unmatched for k in flatten_dict(__magic_name__ )} lowercase : Any ={k: replace(__magic_name__ , __magic_name__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__magic_name__ ) )
92
0
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __A ( unittest.TestCase ): @slow def A__ ( self :Tuple ): '''simple docstring''' __magic_name__ : Union[str, Any] =FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" ) __magic_name__ : Optional[Any] =AutoTokenizer.from_pretrained("""google/mt5-small""" ) __magic_name__ : List[str] =tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids __magic_name__ : Tuple =tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids __magic_name__ : List[str] =shift_tokens_right(__snake_case , model.config.pad_token_id , model.config.decoder_start_token_id ) __magic_name__ : Tuple =model(__snake_case , decoder_input_ids=__snake_case ).logits __magic_name__ : Union[str, Any] =optax.softmax_cross_entropy(__snake_case , onehot(__snake_case , logits.shape[-1] ) ).mean() __magic_name__ : Union[str, Any] =-(labels.shape[-1] * loss.item()) __magic_name__ : List[Any] =-84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
21
'''simple docstring''' from collections import defaultdict def _lowerCAmelCase ( __magic_name__ : int ) -> int: lowercase : Optional[Any] =1 lowercase : Union[str, Any] =True for v in tree[start]: if v not in visited: ret += dfs(__magic_name__ ) if ret % 2 == 0: cuts.append(__magic_name__ ) return ret def _lowerCAmelCase ( ) -> int: dfs(1 ) if __name__ == "__main__": UpperCamelCase_ , UpperCamelCase_ = 10, 9 UpperCamelCase_ = defaultdict(list) UpperCamelCase_ = {} UpperCamelCase_ = [] UpperCamelCase_ = 0 UpperCamelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
92
0
'''simple docstring''' import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter _snake_case : Union[str, Any] = True except ImportError: _snake_case : Tuple = False _snake_case : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name def snake_case_ (UpperCamelCase : Namespace ): '''simple docstring''' return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class A ( _a ): @staticmethod def __lowerCAmelCase ( lowerCAmelCase_ : ArgumentParser ) -> Any: """simple docstring""" _a = parser.add_parser('''add-new-model''' ) add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' ) add_new_model_parser.add_argument('''--testing_file''' , type=lowerCAmelCase_ , help='''Configuration file on which to run.''' ) add_new_model_parser.add_argument( '''--path''' , type=lowerCAmelCase_ , help='''Path to cookiecutter. Should only be used for testing purposes.''' ) add_new_model_parser.set_defaults(func=lowerCAmelCase_ ) def __init__( self : Optional[Any] , lowerCAmelCase_ : bool , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str]=None , *lowerCAmelCase_ : Tuple ) -> Optional[int]: """simple docstring""" _a = testing _a = testing_file _a = path def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" warnings.warn( '''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ''' '''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ''' '''checks, you should use `transformers-cli add-new-model-like` instead.''' ) if not _has_cookiecutter: raise ImportError( '''Model creation dependencies are required to use the `add_new_model` command. Install them by running ''' '''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory _a = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]] if len(lowerCAmelCase_ ) > 0: raise ValueError( '''Several directories starting with `cookiecutter-template-` in current working directory. ''' '''Please clean your directory by removing all folders starting with `cookiecutter-template-` or ''' '''change your working directory.''' ) _a = ( Path(lowerCAmelCase_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) _a = path_to_transformer_root / '''templates''' / '''adding_a_new_model''' # Execute cookiecutter if not self._testing: cookiecutter(str(lowerCAmelCase_ ) ) else: with open(self._testing_file , '''r''' ) as configuration_file: _a = json.load(lowerCAmelCase_ ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCAmelCase_ , extra_context=lowerCAmelCase_ , ) _a = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0] # Retrieve configuration with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file: _a = json.load(lowerCAmelCase_ ) _a = configuration['''lowercase_modelname'''] _a = configuration['''generate_tensorflow_pytorch_and_flax'''] os.remove(F'{directory}/configuration.json' ) _a = '''PyTorch''' in generate_tensorflow_pytorch_and_flax _a = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax _a = '''Flax''' in generate_tensorflow_pytorch_and_flax _a = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}' os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=lowerCAmelCase_ ) # Tests require submodules as they have parent imports with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , '''w''' ): pass shutil.move( F'{directory}/__init__.py' , F'{model_dir}/__init__.py' , ) shutil.move( F'{directory}/configuration_{lowercase_model_name}.py' , F'{model_dir}/configuration_{lowercase_model_name}.py' , ) def remove_copy_lines(lowerCAmelCase_ : int ): with open(lowerCAmelCase_ , '''r''' ) as f: _a = f.readlines() with open(lowerCAmelCase_ , '''w''' ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(lowerCAmelCase_ ) if output_pytorch: if not self._testing: remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' ) shutil.move( F'{directory}/modeling_{lowercase_model_name}.py' , F'{model_dir}/modeling_{lowercase_model_name}.py' , ) shutil.move( F'{directory}/test_modeling_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , ) else: os.remove(F'{directory}/modeling_{lowercase_model_name}.py' ) os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' ) if output_tensorflow: if not self._testing: remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' ) shutil.move( F'{directory}/modeling_tf_{lowercase_model_name}.py' , F'{model_dir}/modeling_tf_{lowercase_model_name}.py' , ) shutil.move( F'{directory}/test_modeling_tf_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , ) else: os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' ) os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ) if output_flax: if not self._testing: remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' ) shutil.move( F'{directory}/modeling_flax_{lowercase_model_name}.py' , F'{model_dir}/modeling_flax_{lowercase_model_name}.py' , ) shutil.move( F'{directory}/test_modeling_flax_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , ) else: os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' ) os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ) shutil.move( F'{directory}/{lowercase_model_name}.md' , F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , ) shutil.move( F'{directory}/tokenization_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}.py' , ) shutil.move( F'{directory}/tokenization_fast_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] ): # Create temp file _a , _a = mkstemp() _a = False with fdopen(lowerCAmelCase_ , '''w''' ) as new_file: with open(lowerCAmelCase_ ) as old_file: for line in old_file: new_file.write(lowerCAmelCase_ ) if line_to_copy_below in line: _a = True for line_to_copy in lines_to_copy: new_file.write(lowerCAmelCase_ ) if not line_found: raise ValueError(F'Line {line_to_copy_below} was not found in file.' ) # Copy the file permissions from the old file to the new file copymode(lowerCAmelCase_ , lowerCAmelCase_ ) # Remove original file remove(lowerCAmelCase_ ) # Move new file move(lowerCAmelCase_ , lowerCAmelCase_ ) def skip_units(lowerCAmelCase_ : str ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(lowerCAmelCase_ : Optional[Any] ): with open(lowerCAmelCase_ ) as datafile: _a = [] _a = False _a = False for line in datafile: if "# To replace in: " in line and "##" not in line: _a = line.split('''"''' )[1] _a = skip_units(lowerCAmelCase_ ) elif "# Below: " in line and "##" not in line: _a = line.split('''"''' )[1] _a = skip_units(lowerCAmelCase_ ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _a = [] elif "# Replace with" in line and "##" not in line: _a = [] elif "##" not in line: lines_to_copy.append(lowerCAmelCase_ ) remove(lowerCAmelCase_ ) replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' ) os.rmdir(lowerCAmelCase_ )
22
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase_ = logging.get_logger(__name__) def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Dict: lowercase : List[str] =R'''\w+[.]\d+''' lowercase : List[str] =re.findall(__magic_name__ , __magic_name__ ) for pat in pats: lowercase : Optional[int] =key.replace(__magic_name__ , '''_'''.join(pat.split('''.''' ) ) ) return key def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> str: lowercase : Dict =pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowercase : str =pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowercase : str =pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowercase : Dict =pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowercase : Tuple =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowercase : Tuple =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowercase : str =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowercase : Optional[Any] =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowercase : Dict =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowercase : Union[str, Any] =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=42 ) -> List[str]: # Step 1: Convert pytorch tensor to numpy lowercase : Optional[Any] ={k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowercase : str =flax_model.init_weights(PRNGKey(__magic_name__ ) ) lowercase : Dict =flatten_dict(__magic_name__ ) lowercase : Dict ={} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowercase : Dict =rename_key(__magic_name__ ) lowercase : Optional[int] =tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowercase , lowercase : Any =rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowercase : Tuple =jnp.asarray(__magic_name__ ) return unflatten_dict(__magic_name__ )
92
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available snake_case__ : Optional[Any] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Any = ["""HerbertTokenizerFast"""] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys snake_case__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
23
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. UpperCamelCase_ = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. UpperCamelCase_ = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. UpperCamelCase_ = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, float]: lowercase : int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] ) return (item, float(__magic_name__ )) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, str]: lowercase : Any =random.randint(0 , len(__magic_name__ ) - 1 ) lowercase : Tuple =parent_a[:random_slice] + parent_a[random_slice:] lowercase : List[str] =parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] ) -> str: lowercase : Union[str, Any] =list(__magic_name__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowercase : Dict =random.choice(__magic_name__ ) return "".join(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ) -> list[str]: lowercase : Any =[] # Generate more children proportionally to the fitness score. lowercase : Dict =int(parent_a[1] * 100 ) + 1 lowercase : List[str] =10 if child_n >= 10 else child_n for _ in range(__magic_name__ ): lowercase : List[str] =population_score[random.randint(0 , __magic_name__ )][0] lowercase , lowercase : Dict =crossover(parent_a[0] , __magic_name__ ) # Append new string to the population list. pop.append(mutate(__magic_name__ , __magic_name__ ) ) pop.append(mutate(__magic_name__ , __magic_name__ ) ) return pop def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ) -> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowercase : List[str] =f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(__magic_name__ ) # Verify that the target contains no genes besides the ones inside genes variable. lowercase : Optional[int] =sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowercase : Dict =f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(__magic_name__ ) # Generate random starting population. lowercase : int =[] for _ in range(__magic_name__ ): population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) ) # Just some logs to know what the algorithms is doing. lowercase , lowercase : Optional[int] =0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(__magic_name__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowercase : List[str] =[evaluate(__magic_name__ , __magic_name__ ) for item in population] # Check if there is a matching evolution. lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowercase : Any =population[: int(N_POPULATION / 3 )] population.clear() population.extend(__magic_name__ ) # Normalize population score to be between 0 and 1. lowercase : Dict =[ (item, score / len(__magic_name__ )) for item, score in population_score ] # This is selection for i in range(__magic_name__ ): population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(__magic_name__ ) > N_POPULATION: break if __name__ == "__main__": UpperCamelCase_ = ( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) UpperCamelCase_ = list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\""" ) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
92
0
'''simple docstring''' import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) UpperCAmelCase_ : Union[str, Any] = logging.getLogger(__name__) if __name__ == "__main__": UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int) UpperCAmelCase_ : Union[str, Any] = parser.parse_args() logger.info(F"""Loading data from {args.data_file}""") with open(args.data_file, '''rb''') as fp: UpperCAmelCase_ : Optional[int] = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') UpperCAmelCase_ : Union[str, Any] = Counter() for tk_ids in data: counter.update(tk_ids) UpperCAmelCase_ : Dict = [0] * args.vocab_size for k, v in counter.items(): UpperCAmelCase_ : str = v logger.info(F"""Dump to {args.token_counts_dump}""") with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
24
'''simple docstring''' import datasets UpperCamelCase_ = """\ @InProceedings{conneau2018xnli, author = \"Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin\", title = \"XNLI: Evaluating Cross-lingual Sentence Representations\", booktitle = \"Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing\", year = \"2018\", publisher = \"Association for Computational Linguistics\", location = \"Brussels, Belgium\", } """ UpperCamelCase_ = """\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). """ UpperCamelCase_ = """ Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: 'accuracy': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric(\"xnli\") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} """ def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Union[str, Any]: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): def lowerCamelCase_ ( self : str ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), '''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
92
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = { 'configuration_clap': [ 'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST', 'ClapAudioConfig', 'ClapConfig', 'ClapTextConfig', ], 'processing_clap': ['ClapProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST', 'ClapModel', 'ClapPreTrainedModel', 'ClapTextModel', 'ClapTextModelWithProjection', 'ClapAudioModel', 'ClapAudioModelWithProjection', ] a_ = ['ClapFeatureExtractor'] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
25
'''simple docstring''' from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class __SCREAMING_SNAKE_CASE : def __init__( self : List[str] , UpperCAmelCase__ : Dict , ): '''simple docstring''' lowercase : Any =parent lowercase : Optional[int] =13 lowercase : Union[str, Any] =7 lowercase : str =30 lowercase : Optional[int] =self.seq_length + self.mem_len lowercase : Dict =15 lowercase : List[str] =True lowercase : Optional[int] =True lowercase : Tuple =99 lowercase : str =[10, 50, 80] lowercase : List[Any] =32 lowercase : Optional[int] =32 lowercase : int =4 lowercase : Any =8 lowercase : List[Any] =128 lowercase : List[str] =2 lowercase : Tuple =2 lowercase : int =None lowercase : Optional[int] =1 lowercase : int =0 lowercase : List[str] =3 lowercase : str =self.vocab_size - 1 lowercase : Tuple =0.01 def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : str =None if self.use_labels: lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Union[str, Any] =TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' random.seed(self.seed ) tf.random.set_seed(self.seed ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Tuple =TFTransfoXLModel(UpperCAmelCase__ ) lowercase , lowercase : Optional[Any] =model(UpperCAmelCase__ ).to_tuple() lowercase : List[str] ={'''input_ids''': input_ids_a, '''mems''': mems_a} lowercase , lowercase : Any =model(UpperCAmelCase__ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : int =TFTransfoXLLMHeadModel(UpperCAmelCase__ ) lowercase , lowercase : Tuple =model(UpperCAmelCase__ ).to_tuple() lowercase : Optional[Any] ={'''input_ids''': input_ids_a, '''labels''': lm_labels} lowercase , lowercase : Optional[int] =model(UpperCAmelCase__ ).to_tuple() lowercase , lowercase : List[str] =model([input_ids_a, mems_a] ).to_tuple() lowercase : int ={'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels} lowercase , lowercase : str =model(UpperCAmelCase__ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Optional[int] =TFTransfoXLForSequenceClassification(UpperCAmelCase__ ) lowercase : Union[str, Any] =model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[Any] =self.prepare_config_and_inputs() ((lowercase) , (lowercase) , (lowercase) , (lowercase)) : Optional[Any] =config_and_inputs lowercase : Union[str, Any] ={'''input_ids''': input_ids_a} return config, inputs_dict @require_tf class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) lowerCamelCase_ = () if is_tf_available() else () lowerCamelCase_ = ( { 'feature-extraction': TFTransfoXLModel, 'text-classification': TFTransfoXLForSequenceClassification, 'text-generation': TFTransfoXLLMHeadModel, 'zero-shot': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Any =TFTransfoXLModelTester(self ) lowercase : Optional[int] =ConfigTester(self , config_class=UpperCAmelCase__ , d_embed=37 ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' self.model_tester.set_seed() lowercase : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.model_tester.set_seed() lowercase : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common() lowercase : int =[TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: lowercase : str =model_class(UpperCAmelCase__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: lowercase : Union[str, Any] =model.get_output_embeddings() assert isinstance(UpperCAmelCase__ , tf.keras.layers.Layer ) lowercase : Any =model.get_bias() assert name is None else: lowercase : Optional[int] =model.get_output_embeddings() assert x is None lowercase : Optional[int] =model.get_bias() assert name is None def lowerCamelCase_ ( self : Any ): '''simple docstring''' # TODO JP: Make TransfoXL XLA compliant pass @slow def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : int =TFTransfoXLModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' ) def lowerCamelCase_ ( self : int ): '''simple docstring''' pass @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @unittest.skip('''Skip test until #12651 is resolved.''' ) @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Optional[Any] =TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' ) # fmt: off lowercase : Tuple =tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off lowercase : Optional[int] =[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> lowercase : int =model.generate(UpperCAmelCase__ , max_length=200 , do_sample=UpperCAmelCase__ ) self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__ )
92
0
'''simple docstring''' import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 __UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json") __UpperCamelCase = get_tests_dir("fixtures/vocab.json") __UpperCamelCase = get_tests_dir("fixtures") class _A ( unittest.TestCase ): lowercase__: str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def lowercase__ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[int] = 0 def lowercase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __snake_case : int = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(__magic_name__ , __magic_name__ ) def lowercase__ ( self : str ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __snake_case : List[Any] = WavaVecaConfig() __snake_case : List[Any] = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" ) # save in new folder model_config.save_pretrained(__magic_name__ ) processor.save_pretrained(__magic_name__ ) __snake_case : Optional[int] = AutoProcessor.from_pretrained(__magic_name__ ) self.assertIsInstance(__magic_name__ , __magic_name__ ) def lowercase__ ( self : List[str] ) -> Dict: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) ) copyfile(__magic_name__ , os.path.join(__magic_name__ , """vocab.json""" ) ) __snake_case : Optional[Any] = AutoProcessor.from_pretrained(__magic_name__ ) self.assertIsInstance(__magic_name__ , __magic_name__ ) def lowercase__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __snake_case : Any = WavaVecaFeatureExtractor() __snake_case : List[Any] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" ) __snake_case : Union[str, Any] = WavaVecaProcessor(__magic_name__ , __magic_name__ ) # save in new folder processor.save_pretrained(__magic_name__ ) # drop `processor_class` in tokenizer with open(os.path.join(__magic_name__ , __magic_name__ ) , """r""" ) as f: __snake_case : Optional[Any] = json.load(__magic_name__ ) config_dict.pop("""processor_class""" ) with open(os.path.join(__magic_name__ , __magic_name__ ) , """w""" ) as f: f.write(json.dumps(__magic_name__ ) ) __snake_case : List[str] = AutoProcessor.from_pretrained(__magic_name__ ) self.assertIsInstance(__magic_name__ , __magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __snake_case : Optional[int] = WavaVecaFeatureExtractor() __snake_case : List[Any] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" ) __snake_case : Optional[Any] = WavaVecaProcessor(__magic_name__ , __magic_name__ ) # save in new folder processor.save_pretrained(__magic_name__ ) # drop `processor_class` in feature extractor with open(os.path.join(__magic_name__ , __magic_name__ ) , """r""" ) as f: __snake_case : str = json.load(__magic_name__ ) config_dict.pop("""processor_class""" ) with open(os.path.join(__magic_name__ , __magic_name__ ) , """w""" ) as f: f.write(json.dumps(__magic_name__ ) ) __snake_case : Union[str, Any] = AutoProcessor.from_pretrained(__magic_name__ ) self.assertIsInstance(__magic_name__ , __magic_name__ ) def lowercase__ ( self : str ) -> List[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __snake_case : List[Any] = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" ) model_config.save_pretrained(__magic_name__ ) # copy relevant files copyfile(__magic_name__ , os.path.join(__magic_name__ , """vocab.json""" ) ) # create emtpy sample processor with open(os.path.join(__magic_name__ , __magic_name__ ) , """w""" ) as f: f.write("""{}""" ) __snake_case : int = AutoProcessor.from_pretrained(__magic_name__ ) self.assertIsInstance(__magic_name__ , __magic_name__ ) def lowercase__ ( self : Dict ) -> List[str]: """simple docstring""" with self.assertRaises(__magic_name__ ): __snake_case : List[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__magic_name__ ): __snake_case : Union[str, Any] = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__magic_name__ ) __snake_case : List[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__magic_name__ ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) __snake_case : Tuple = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) __snake_case : Any = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version __snake_case : List[str] = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ ) __snake_case : Optional[int] = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def lowercase__ ( self : List[str] ) -> str: """simple docstring""" try: AutoConfig.register("""custom""" , __magic_name__ ) AutoFeatureExtractor.register(__magic_name__ , __magic_name__ ) AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ ) AutoProcessor.register(__magic_name__ , __magic_name__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__magic_name__ ): AutoProcessor.register(__magic_name__ , __magic_name__ ) # Now that the config is registered, it can be used as any other config with the auto-API __snake_case : Union[str, Any] = CustomFeatureExtractor.from_pretrained(__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : str = os.path.join(__magic_name__ , """vocab.txt""" ) with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) __snake_case : int = CustomTokenizer(__magic_name__ ) __snake_case : str = CustomProcessor(__magic_name__ , __magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(__magic_name__ ) __snake_case : Dict = AutoProcessor.from_pretrained(__magic_name__ ) self.assertIsInstance(__magic_name__ , __magic_name__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def lowercase__ ( self : Any ) -> List[Any]: """simple docstring""" class _A ( __lowercase ): lowercase__: Optional[Any] = False class _A ( __lowercase ): lowercase__: int = False class _A ( __lowercase ): lowercase__: List[Any] = '''AutoFeatureExtractor''' lowercase__: Tuple = '''AutoTokenizer''' lowercase__: Union[str, Any] = False try: AutoConfig.register("""custom""" , __magic_name__ ) AutoFeatureExtractor.register(__magic_name__ , __magic_name__ ) AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ ) AutoProcessor.register(__magic_name__ , __magic_name__ ) # If remote code is not set, the default is to use local classes. __snake_case : Any = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. __snake_case : Optional[Any] = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__magic_name__ ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. __snake_case : List[Any] = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__magic_name__ ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def lowercase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __snake_case : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" ) def lowercase__ ( self : List[str] ) -> str: """simple docstring""" __snake_case : int = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" ) self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" ) @is_staging_test class _A ( unittest.TestCase ): lowercase__: Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def lowercase__ ( cls : str ) -> List[str]: """simple docstring""" __snake_case : Dict = TOKEN HfFolder.save_token(__magic_name__ ) @classmethod def lowercase__ ( cls : Dict ) -> str: """simple docstring""" try: delete_repo(token=cls._token , repo_id="""test-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" ) except HTTPError: pass def lowercase__ ( self : str ) -> str: """simple docstring""" __snake_case : int = WavaVecaProcessor.from_pretrained(__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(__magic_name__ , """test-processor""" ) , push_to_hub=__magic_name__ , use_auth_token=self._token ) __snake_case : Union[str, Any] = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(__magic_name__ , getattr(new_processor.feature_extractor , __magic_name__ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __snake_case : List[str] = WavaVecaProcessor.from_pretrained(__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(__magic_name__ , """test-processor-org""" ) , push_to_hub=__magic_name__ , use_auth_token=self._token , organization="""valid_org""" , ) __snake_case : Tuple = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(__magic_name__ , getattr(new_processor.feature_extractor , __magic_name__ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() __snake_case : Optional[int] = CustomFeatureExtractor.from_pretrained(__magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: __snake_case : Optional[Any] = os.path.join(__magic_name__ , """vocab.txt""" ) with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) __snake_case : Union[str, Any] = CustomTokenizer(__magic_name__ ) __snake_case : Optional[int] = CustomProcessor(__magic_name__ , __magic_name__ ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token ) __snake_case : str = Repository(__magic_name__ , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token ) processor.save_pretrained(__magic_name__ ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { """AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""", """AutoProcessor""": """custom_processing.CustomProcessor""", } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) ) as f: __snake_case : Dict = json.load(__magic_name__ ) self.assertDictEqual( tokenizer_config["""auto_map"""] , { """AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None], """AutoProcessor""": """custom_processing.CustomProcessor""", } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(__magic_name__ , """custom_feature_extraction.py""" ) ) ) self.assertTrue(os.path.isfile(os.path.join(__magic_name__ , """custom_tokenization.py""" ) ) ) self.assertTrue(os.path.isfile(os.path.join(__magic_name__ , """custom_processing.py""" ) ) ) repo.push_to_hub() __snake_case : List[str] = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=__magic_name__ ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
26
'''simple docstring''' import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __SCREAMING_SNAKE_CASE : def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Optional[Any]=36 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Tuple=None , ): '''simple docstring''' lowercase : str =parent lowercase : int =batch_size lowercase : Any =seq_length lowercase : int =is_training lowercase : str =use_input_mask lowercase : int =use_token_type_ids lowercase : Dict =use_labels lowercase : int =vocab_size lowercase : str =embedding_size lowercase : Union[str, Any] =hidden_size lowercase : Tuple =num_hidden_layers lowercase : Any =num_hidden_groups lowercase : Union[str, Any] =num_attention_heads lowercase : Any =intermediate_size lowercase : Tuple =hidden_act lowercase : Optional[int] =hidden_dropout_prob lowercase : Union[str, Any] =attention_probs_dropout_prob lowercase : List[Any] =max_position_embeddings lowercase : int =type_vocab_size lowercase : int =type_sequence_label_size lowercase : Any =initializer_range lowercase : List[Any] =num_labels lowercase : int =num_choices lowercase : Optional[int] =scope def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Optional[int] =None if self.use_input_mask: lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] ) lowercase : Dict =None if self.use_token_type_ids: lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase : Tuple =None lowercase : Any =None lowercase : Dict =None if self.use_labels: lowercase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices ) lowercase : Any =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ): '''simple docstring''' lowercase : int =AlbertModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : Dict =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : int =model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ): '''simple docstring''' lowercase : Tuple =AlbertForPreTraining(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , sentence_order_label=UpperCAmelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Tuple =AlbertForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : List[str] =AlbertForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : List[str] =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Optional[Any] =self.num_labels lowercase : Any =AlbertForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Dict =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ): '''simple docstring''' lowercase : List[Any] =self.num_labels lowercase : str =AlbertForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Optional[int] =self.num_choices lowercase : List[Any] =AlbertForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : List[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : List[str] =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Union[str, Any] =self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Dict =config_and_inputs lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase_ = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase_ = True def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=False ): '''simple docstring''' lowercase : Optional[int] =super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if return_labels: if model_class in get_values(UpperCAmelCase__ ): lowercase : Any =torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ ) lowercase : Any =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) return inputs_dict def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Tuple =AlbertModelTester(self ) lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase : Tuple =type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : str =AlbertModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : int =AlbertModel.from_pretrained('''albert-base-v2''' ) lowercase : Optional[int] =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowercase : Any =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase : Any =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] lowercase : int =torch.Size((1, 11, 768) ) self.assertEqual(output.shape , UpperCAmelCase__ ) lowercase : Union[str, Any] =torch.tensor( [[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
92
0
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[int]: """simple docstring""" _A = int(_SCREAMING_SNAKE_CASE ) # Initialize Result _A = [] # Traverse through all denomination for denomination in reversed(_SCREAMING_SNAKE_CASE ): # Find denominations while int(_SCREAMING_SNAKE_CASE ) >= int(_SCREAMING_SNAKE_CASE ): total_value -= int(_SCREAMING_SNAKE_CASE ) answer.append(_SCREAMING_SNAKE_CASE ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": __A : int = [] __A : List[Any] = "0" if ( input("Do you want to enter your denominations ? (yY/n): ").strip().lower() == "y" ): __A : Optional[Any] = int(input("Enter the number of denominations you want to add: ").strip()) for i in range(0, n): denominations.append(int(input(f"Denomination {i}: ").strip())) __A : Any = input("Enter the change you want to make in Indian Currency: ").strip() else: # All denominations of Indian Currency if user does not enter __A : Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000] __A : Optional[Any] = input("Enter the change you want to make: ").strip() if int(value) == 0 or int(value) < 0: print("The total value cannot be zero or negative.") else: print(f"Following is minimal change for {value}: ") __A : int = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=" ")
27
'''simple docstring''' import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ): '''simple docstring''' if dst_width < 0 or dst_height < 0: raise ValueError('''Destination width/height should be > 0''' ) lowercase : Union[str, Any] =img lowercase : Union[str, Any] =img.shape[1] lowercase : str =img.shape[0] lowercase : Union[str, Any] =dst_width lowercase : str =dst_height lowercase : str =self.src_w / self.dst_w lowercase : Optional[Any] =self.src_h / self.dst_h lowercase : int =( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255 ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' for i in range(self.dst_h ): for j in range(self.dst_w ): lowercase : List[Any] =self.img[self.get_y(UpperCAmelCase__ )][self.get_x(UpperCAmelCase__ )] def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ): '''simple docstring''' return int(self.ratio_x * x ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int ): '''simple docstring''' return int(self.ratio_y * y ) if __name__ == "__main__": UpperCamelCase_ , UpperCamelCase_ = 800, 600 UpperCamelCase_ = imread("""image_data/lena.jpg""", 1) UpperCamelCase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output ) waitKey(0) destroyAllWindows()
92
0
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"vocab_file": "spiece.model"} UpperCamelCase_ = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } UpperCamelCase_ = { "AI-Sweden/gpt-sw3-126m": 2_0_4_8, "AI-Sweden/gpt-sw3-350m": 2_0_4_8, "AI-Sweden/gpt-sw3-1.6b": 2_0_4_8, "AI-Sweden/gpt-sw3-6.7b": 2_0_4_8, "AI-Sweden/gpt-sw3-20b": 2_0_4_8, } class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : int = VOCAB_FILES_NAMES A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Tuple = ['''input_ids''', '''attention_mask'''] def __init__( self, A, A=False, A=False, A=False, A=None, A=None, A=None, A=None, A = None, **A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.get('name_or_path' ) if name_or_path is None: logger.warning( 'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,' ' you are testing the model, this can safely be ignored' ) SCREAMING_SNAKE_CASE : Dict = 'None' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing SCREAMING_SNAKE_CASE : Optional[Any] = '<|endoftext|>' if eos_token is None else eos_token SCREAMING_SNAKE_CASE : int = '<unk>' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: SCREAMING_SNAKE_CASE : Dict = unk_token if pad_token is None else pad_token SCREAMING_SNAKE_CASE : List[Any] = eos_token if bos_token is None else bos_token else: SCREAMING_SNAKE_CASE : Optional[int] = '<pad>' if pad_token is None else pad_token SCREAMING_SNAKE_CASE : int = '<s>' if bos_token is None else bos_token super().__init__( do_lower_case=A, remove_space=A, keep_accents=A, bos_token=A, eos_token=A, unk_token=A, pad_token=A, sp_model_kwargs=self.sp_model_kwargs, **A, ) SCREAMING_SNAKE_CASE : Tuple = do_lower_case SCREAMING_SNAKE_CASE : Optional[Any] = remove_space SCREAMING_SNAKE_CASE : List[Any] = keep_accents SCREAMING_SNAKE_CASE : Tuple = vocab_file SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A ) # Used for whitespace normalization in input texts # fmt : off SCREAMING_SNAKE_CASE : Any = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', '„'} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing SCREAMING_SNAKE_CASE : str = re.compile( F"[{''.join(map(A, list(range(0, 9 ) ) + list(range(11, 32 ) ) + list(range(127, 160 ) ) + [160, 173, 8_203] ) )}]" ) def __getstate__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.__dict__.copy() SCREAMING_SNAKE_CASE : Optional[int] = None return state def __setstate__( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = d # for backward compatibility if not hasattr(self, 'sp_model_kwargs' ): SCREAMING_SNAKE_CASE : Optional[Any] = {} SCREAMING_SNAKE_CASE : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCamelCase_ ( self ): '''simple docstring''' return len(self.sp_model ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.non_printing_characters_re.sub('', A ) # Normalize whitespaces SCREAMING_SNAKE_CASE : Any = ''.join([char if char not in self.whitespaces else ' ' for char in text] ) # NFC Unicode normalization SCREAMING_SNAKE_CASE : List[str] = unicodedata.normalize('NFC', A ) return text def UpperCamelCase_ ( self, A, **A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.preprocess_text(A ) return self.sp_model.encode(A, out_type=A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.sp_model.PieceToId(A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.sp_model.IdToPiece(A ) @staticmethod def UpperCamelCase_ ( A ): '''simple docstring''' return out_string def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : str = '' SCREAMING_SNAKE_CASE : Optional[int] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A ) + token SCREAMING_SNAKE_CASE : str = True SCREAMING_SNAKE_CASE : List[str] = [] else: current_sub_tokens.append(A ) SCREAMING_SNAKE_CASE : int = False out_string += self.sp_model.decode(A ) return out_string def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' if not os.path.isdir(A ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE : Any = os.path.join( A, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, A ) elif not os.path.isfile(self.vocab_file ): with open(A, 'wb' ) as fi: SCREAMING_SNAKE_CASE : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,) def UpperCamelCase_ ( self, A, A = False ): '''simple docstring''' if isinstance(A, A ): SCREAMING_SNAKE_CASE : Optional[int] = self.preprocess_text(A ) SCREAMING_SNAKE_CASE : Tuple = self.sp_model.encode(A ) else: SCREAMING_SNAKE_CASE : List[Any] = [self.preprocess_text(A ) for t in text] SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.encode(A ) if return_tensors is True or return_tensors == "pt": SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(A ) return token_ids def UpperCamelCase_ ( self, A ): '''simple docstring''' return self.sp_model.decode(A ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()] SCREAMING_SNAKE_CASE : Any = ( F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(A ) + F"{self.bos_token}Bot:" ) return self.encode(text=A )
28
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float: lowercase : Any =0.0_0 lowercase : Tuple =0 for resistor in resistors: if resistor <= 0: lowercase : Dict =f'''Resistor at index {index} has a negative or zero value!''' raise ValueError(__magic_name__ ) first_sum += 1 / float(__magic_name__ ) index += 1 return 1 / first_sum def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float: lowercase : Optional[Any] =0.0_0 lowercase : int =0 for resistor in resistors: sum_r += resistor if resistor < 0: lowercase : Tuple =f'''Resistor at index {index} has a negative value!''' raise ValueError(__magic_name__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
92
0
"""simple docstring""" import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features A_ = logging.get_logger(__name__) A_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) A_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __lowerCamelCase : a__: str = field( default=lowerCAmelCase , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCAmelCase )} ) a__: str = field( default=lowerCAmelCase , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} ) a__: int = field( default=128 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) a__: int = field( default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , ) a__: int = field( default=64 , metadata={ 'help': ( 'The maximum number of tokens for the question. Questions longer than this will ' 'be truncated to this length.' ) } , ) a__: int = field( default=30 , metadata={ 'help': ( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ) } , ) a__: bool = field( default=lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) a__: bool = field( default=lowerCAmelCase , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} ) a__: float = field( default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) a__: int = field( default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) a__: int = field( default=0 , metadata={ 'help': ( 'language id of input for language-specific xlm models (see' ' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)' ) } , ) a__: int = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} ) class __lowerCamelCase ( lowerCAmelCase ): a__: int = 'train' a__: Any = 'dev' class __lowerCamelCase ( lowerCAmelCase ): a__: SquadDataTrainingArguments a__: List[SquadFeatures] a__: Split a__: bool def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = Split.train , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = "pt" , ): lowerCamelCase_ = args lowerCamelCase_ = is_language_sensitive lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(UpperCAmelCase , UpperCAmelCase ): try: lowerCamelCase_ = Split[mode] except KeyError: raise KeyError('''mode is not a valid split name''' ) lowerCamelCase_ = mode # Load data features from cache or dataset file lowerCamelCase_ = '''v2''' if args.version_2_with_negative else '''v1''' lowerCamelCase_ = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase_ = cached_features_file + '''.lock''' with FileLock(UpperCAmelCase ): if os.path.exists(UpperCAmelCase ) and not args.overwrite_cache: lowerCamelCase_ = time.time() lowerCamelCase_ = torch.load(UpperCAmelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowerCamelCase_ = self.old_features['''features'''] lowerCamelCase_ = self.old_features.get('''dataset''' , UpperCAmelCase ) lowerCamelCase_ = self.old_features.get('''examples''' , UpperCAmelCase ) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in" ''' future run''' ) else: if mode == Split.dev: lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir ) else: lowerCamelCase_ = self.processor.get_train_examples(args.data_dir ) lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features( examples=self.examples , tokenizer=UpperCAmelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=UpperCAmelCase , ) lowerCamelCase_ = time.time() torch.save( {'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , UpperCAmelCase , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__( self ): return len(self.features ) def __getitem__( self , UpperCAmelCase ): # Convert to Tensors and build dataset lowerCamelCase_ = self.features[i] lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long ) lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long ) lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long ) lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long ) lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float ) lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float ) lowerCamelCase_ = { '''input_ids''': input_ids, '''attention_mask''': attention_mask, '''token_type_ids''': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} ) if self.args.version_2_with_negative: inputs.update({'''is_impossible''': is_impossible} ) if self.is_language_sensitive: inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long ) lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} ) return inputs
29
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""", """self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""", """self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""", """self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""", """self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""", """self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""", """self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""", """self_attn.rotary_emb""": """encoder.embed_positions""", """self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""", """conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""", """conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""", """conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""", """conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""", """conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""", """ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""", """ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""", """ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""", """ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""", """ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""", """ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } UpperCamelCase_ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> str: for attribute in key.split('''.''' ): lowercase : Tuple =getattr(__magic_name__ , __magic_name__ ) if weight_type is not None: lowercase : Optional[int] =getattr(__magic_name__ , __magic_name__ ).shape else: lowercase : List[Any] =hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase : Any =value elif weight_type == "weight_g": lowercase : List[Any] =value elif weight_type == "weight_v": lowercase : Union[str, Any] =value elif weight_type == "bias": lowercase : Tuple =value elif weight_type == "running_mean": lowercase : Union[str, Any] =value elif weight_type == "running_var": lowercase : str =value elif weight_type == "num_batches_tracked": lowercase : Tuple =value elif weight_type == "inv_freq": lowercase : Optional[Any] =value else: lowercase : Tuple =value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]: lowercase : Optional[int] =[] lowercase : Tuple =fairseq_model.state_dict() lowercase : List[Any] =hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): lowercase : Tuple =False if "conv_layers" in name: load_conv_layer( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , ) lowercase : List[Any] =True else: for key, mapped_key in MAPPING.items(): lowercase : Optional[int] ='''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: lowercase : Union[str, Any] =True if "*" in mapped_key: lowercase : Optional[int] =name.split(__magic_name__ )[0].split('''.''' )[-2] lowercase : List[str] =mapped_key.replace('''*''' , __magic_name__ ) if "pos_bias_u" in name: lowercase : Optional[Any] =None elif "pos_bias_v" in name: lowercase : Union[str, Any] =None elif "weight_g" in name: lowercase : Any ='''weight_g''' elif "weight_v" in name: lowercase : Tuple ='''weight_v''' elif "bias" in name: lowercase : Optional[int] ='''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase : Optional[int] ='''weight''' elif "running_mean" in name: lowercase : Union[str, Any] ='''running_mean''' elif "inv_freq" in name: lowercase : Any ='''inv_freq''' elif "running_var" in name: lowercase : Tuple ='''running_var''' elif "num_batches_tracked" in name: lowercase : Dict ='''num_batches_tracked''' else: lowercase : str =None set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) continue if not is_used: unused_weights.append(__magic_name__ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ) -> int: lowercase : Optional[Any] =full_name.split('''conv_layers.''' )[-1] lowercase : Any =name.split('''.''' ) lowercase : List[str] =int(items[0] ) lowercase : Union[str, Any] =int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase : Union[str, Any] =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase : Optional[Any] =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) lowercase : Optional[int] =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase : str =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__magic_name__ ) @torch.no_grad() def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=True ) -> Union[str, Any]: if config_path is not None: lowercase : Optional[Any] =WavaVecaConformerConfig.from_pretrained(__magic_name__ , hidden_act='''swish''' ) else: lowercase : Optional[int] =WavaVecaConformerConfig() if "rope" in checkpoint_path: lowercase : Dict ='''rotary''' if is_finetuned: if dict_path: lowercase : Optional[Any] =Dictionary.load(__magic_name__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowercase : str =target_dict.pad_index lowercase : Union[str, Any] =target_dict.bos_index lowercase : Any =target_dict.eos_index lowercase : Tuple =len(target_dict.symbols ) lowercase : str =os.path.join(__magic_name__ , '''vocab.json''' ) if not os.path.isdir(__magic_name__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__magic_name__ ) ) return os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowercase : Dict =target_dict.indices # fairseq has the <pad> and <s> switched lowercase : str =0 lowercase : List[Any] =1 with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(__magic_name__ , __magic_name__ ) lowercase : List[str] =WavaVecaCTCTokenizer( __magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__magic_name__ , ) lowercase : Optional[int] =True if config.feat_extract_norm == '''layer''' else False lowercase : str =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , ) lowercase : Tuple =WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) processor.save_pretrained(__magic_name__ ) lowercase : str =WavaVecaConformerForCTC(__magic_name__ ) else: lowercase : Tuple =WavaVecaConformerForPreTraining(__magic_name__ ) if is_finetuned: lowercase , lowercase , lowercase : Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: lowercase : Dict =argparse.Namespace(task='''audio_pretraining''' ) lowercase : Optional[int] =fairseq.tasks.setup_task(__magic_name__ ) lowercase , lowercase , lowercase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__magic_name__ ) lowercase : List[Any] =model[0].eval() recursively_load_weights(__magic_name__ , __magic_name__ , not is_finetuned ) hf_wavavec.save_pretrained(__magic_name__ ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCamelCase_ = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
92
0
def lowerCamelCase__ ( _lowercase = 10 , _lowercase = 22 ): '''simple docstring''' UpperCAmelCase_ : Tuple = range(1 , _lowercase ) UpperCAmelCase_ : Optional[int] = range(1 , _lowercase ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(F"""{solution(10, 22) = }""")
30
'''simple docstring''' import math import flax.linen as nn import jax.numpy as jnp def _lowerCAmelCase ( __magic_name__ : jnp.ndarray , __magic_name__ : int , __magic_name__ : float = 1 , __magic_name__ : float = 1 , __magic_name__ : float = 1.0E4 , __magic_name__ : bool = False , __magic_name__ : float = 1.0 , ) -> jnp.ndarray: assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even''' lowercase : int =float(embedding_dim // 2 ) lowercase : Optional[int] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) lowercase : Any =min_timescale * jnp.exp(jnp.arange(__magic_name__ , dtype=jnp.floataa ) * -log_timescale_increment ) lowercase : List[Any] =jnp.expand_dims(__magic_name__ , 1 ) * jnp.expand_dims(__magic_name__ , 0 ) # scale embeddings lowercase : Tuple =scale * emb if flip_sin_to_cos: lowercase : Dict =jnp.concatenate([jnp.cos(__magic_name__ ), jnp.sin(__magic_name__ )] , axis=1 ) else: lowercase : Any =jnp.concatenate([jnp.sin(__magic_name__ ), jnp.cos(__magic_name__ )] , axis=1 ) lowercase : List[str] =jnp.reshape(__magic_name__ , [jnp.shape(__magic_name__ )[0], embedding_dim] ) return signal class __SCREAMING_SNAKE_CASE ( nn.Module ): lowerCamelCase_ = 32 lowerCamelCase_ = jnp.floataa @nn.compact def __call__( self : Tuple , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : List[Any] =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCAmelCase__ ) lowercase : Any =nn.silu(UpperCAmelCase__ ) lowercase : int =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCAmelCase__ ) return temb class __SCREAMING_SNAKE_CASE ( nn.Module ): lowerCamelCase_ = 32 lowerCamelCase_ = False lowerCamelCase_ = 1 @nn.compact def __call__( self : int , UpperCAmelCase__ : str ): '''simple docstring''' return get_sinusoidal_embeddings( UpperCAmelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
92
0
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig lowerCamelCase__ : List[Any] = logging.get_logger(__name__) # General docstring lowerCamelCase__ : Dict = 'RegNetConfig' # Base docstring lowerCamelCase__ : Union[str, Any] = 'facebook/regnet-y-040' lowerCamelCase__ : Dict = [1, 1_088, 7, 7] # Image classification docstring lowerCamelCase__ : List[Any] = 'facebook/regnet-y-040' lowerCamelCase__ : Any = 'tabby, tabby cat' lowerCamelCase__ : List[Any] = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : Optional[str] = "relu" , **_lowerCAmelCase : Tuple , ): super().__init__(**_lowerCAmelCase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb SCREAMING_SNAKE_CASE_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) SCREAMING_SNAKE_CASE_ = tf.keras.layers.ConvaD( filters=_lowerCAmelCase , kernel_size=_lowerCAmelCase , strides=_lowerCAmelCase , padding='VALID' , groups=_lowerCAmelCase , use_bias=_lowerCAmelCase , name='convolution' , ) SCREAMING_SNAKE_CASE_ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) SCREAMING_SNAKE_CASE_ = ACTaFN[activation] if activation is not None else tf.identity def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Optional[Any] ): SCREAMING_SNAKE_CASE_ = self.convolution(self.padding(_lowerCAmelCase ) ) SCREAMING_SNAKE_CASE_ = self.normalization(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = self.activation(_lowerCAmelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Optional[int] , _lowerCAmelCase : RegNetConfig , **_lowerCAmelCase : List[Any] ): super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = config.num_channels SCREAMING_SNAKE_CASE_ = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , ) def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : List[str] ): SCREAMING_SNAKE_CASE_ = shape_list(_lowerCAmelCase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) SCREAMING_SNAKE_CASE_ = tf.transpose(_lowerCAmelCase , perm=(0, 2, 3, 1) ) SCREAMING_SNAKE_CASE_ = self.embedder(_lowerCAmelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 2 , **_lowerCAmelCase : Dict ): super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = tf.keras.layers.ConvaD( filters=_lowerCAmelCase , kernel_size=1 , strides=_lowerCAmelCase , use_bias=_lowerCAmelCase , name='convolution' ) SCREAMING_SNAKE_CASE_ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : bool = False ): return self.normalization(self.convolution(_lowerCAmelCase ) , training=_lowerCAmelCase ) class lowerCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , **_lowerCAmelCase : Any ): super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowerCAmelCase , name='pooler' ) SCREAMING_SNAKE_CASE_ = [ tf.keras.layers.ConvaD(filters=_lowerCAmelCase , kernel_size=1 , activation='relu' , name='attention.0' ), tf.keras.layers.ConvaD(filters=_lowerCAmelCase , kernel_size=1 , activation='sigmoid' , name='attention.2' ), ] def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] SCREAMING_SNAKE_CASE_ = self.pooler(_lowerCAmelCase ) for layer_module in self.attention: SCREAMING_SNAKE_CASE_ = layer_module(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = hidden_state * pooled return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : str , _lowerCAmelCase : RegNetConfig , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 1 , **_lowerCAmelCase : str ): super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = in_channels != out_channels or stride != 1 SCREAMING_SNAKE_CASE_ = max(1 , out_channels // config.groups_width ) SCREAMING_SNAKE_CASE_ = ( TFRegNetShortCut(_lowerCAmelCase , stride=_lowerCAmelCase , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. SCREAMING_SNAKE_CASE_ = [ TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( _lowerCAmelCase , stride=_lowerCAmelCase , groups=_lowerCAmelCase , activation=config.hidden_act , name='layer.1' ), TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=_lowerCAmelCase , name='layer.2' ), ] SCREAMING_SNAKE_CASE_ = ACTaFN[config.hidden_act] def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Dict ): SCREAMING_SNAKE_CASE_ = hidden_state for layer_module in self.layers: SCREAMING_SNAKE_CASE_ = layer_module(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = self.shortcut(_lowerCAmelCase ) hidden_state += residual SCREAMING_SNAKE_CASE_ = self.activation(_lowerCAmelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[Any] , _lowerCAmelCase : RegNetConfig , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 1 , **_lowerCAmelCase : Dict ): super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = in_channels != out_channels or stride != 1 SCREAMING_SNAKE_CASE_ = max(1 , out_channels // config.groups_width ) SCREAMING_SNAKE_CASE_ = ( TFRegNetShortCut(_lowerCAmelCase , stride=_lowerCAmelCase , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) SCREAMING_SNAKE_CASE_ = [ TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( _lowerCAmelCase , stride=_lowerCAmelCase , groups=_lowerCAmelCase , activation=config.hidden_act , name='layer.1' ), TFRegNetSELayer(_lowerCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ), TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=_lowerCAmelCase , name='layer.3' ), ] SCREAMING_SNAKE_CASE_ = ACTaFN[config.hidden_act] def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Dict ): SCREAMING_SNAKE_CASE_ = hidden_state for layer_module in self.layers: SCREAMING_SNAKE_CASE_ = layer_module(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = self.shortcut(_lowerCAmelCase ) hidden_state += residual SCREAMING_SNAKE_CASE_ = self.activation(_lowerCAmelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : str , _lowerCAmelCase : RegNetConfig , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , **_lowerCAmelCase : Optional[Any] ): super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer SCREAMING_SNAKE_CASE_ = [ # downsampling is done in the first layer with stride of 2 layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase , name='layers.0' ), *[layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , name=F"layers.{i+1}" ) for i in range(depth - 1 )], ] def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Optional[int] ): for layer_module in self.layers: SCREAMING_SNAKE_CASE_ = layer_module(_lowerCAmelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] , _lowerCAmelCase : RegNetConfig , **_lowerCAmelCase : str ): super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) ) SCREAMING_SNAKE_CASE_ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_lowerCAmelCase , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , depth=_lowerCAmelCase , name=F"stages.{i+1}" ) ) def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True ): SCREAMING_SNAKE_CASE_ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: SCREAMING_SNAKE_CASE_ = hidden_states + (hidden_state,) SCREAMING_SNAKE_CASE_ = stage_module(_lowerCAmelCase ) if output_hidden_states: SCREAMING_SNAKE_CASE_ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_lowerCAmelCase , hidden_states=_lowerCAmelCase ) @keras_serializable class lowerCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' lowercase_ = RegNetConfig def __init__( self : int , _lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[int] ): super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = config SCREAMING_SNAKE_CASE_ = TFRegNetEmbeddings(_lowerCAmelCase , name='embedder' ) SCREAMING_SNAKE_CASE_ = TFRegNetEncoder(_lowerCAmelCase , name='encoder' ) SCREAMING_SNAKE_CASE_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowerCAmelCase , name='pooler' ) @unpack_inputs def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : bool = False , ): SCREAMING_SNAKE_CASE_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE_ = self.embedder(_lowerCAmelCase , training=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = self.encoder( _lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = encoder_outputs[0] SCREAMING_SNAKE_CASE_ = self.pooler(_lowerCAmelCase ) # Change to NCHW output format have uniformity in the modules SCREAMING_SNAKE_CASE_ = tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) ) SCREAMING_SNAKE_CASE_ = tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: SCREAMING_SNAKE_CASE_ = tuple([tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_lowerCAmelCase , pooler_output=_lowerCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = RegNetConfig lowercase_ = "regnet" lowercase_ = "pixel_values" @property def lowerCAmelCase_ ( self : List[str] ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} lowerCamelCase__ : int = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' lowerCamelCase__ : Any = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , _SCREAMING_SNAKE_CASE , ) class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Dict , _lowerCAmelCase : RegNetConfig , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Tuple ): super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = TFRegNetMainLayer(_lowerCAmelCase , name='regnet' ) @unpack_inputs @add_start_docstrings_to_model_forward(_lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : str=False , ): SCREAMING_SNAKE_CASE_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE_ = self.regnet( pixel_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _SCREAMING_SNAKE_CASE , ) class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Tuple , _lowerCAmelCase : RegNetConfig , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Optional[int] ): super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = config.num_labels SCREAMING_SNAKE_CASE_ = TFRegNetMainLayer(_lowerCAmelCase , name='regnet' ) # classification head SCREAMING_SNAKE_CASE_ = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : tf.Tensor = None , _lowerCAmelCase : tf.Tensor = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict=False , ): SCREAMING_SNAKE_CASE_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE_ = self.regnet( _lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = outputs.pooler_output if return_dict else outputs[1] SCREAMING_SNAKE_CASE_ = self.classifier[0](_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = self.classifier[1](_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = None if labels is None else self.hf_compute_loss(labels=_lowerCAmelCase , logits=_lowerCAmelCase ) if not return_dict: SCREAMING_SNAKE_CASE_ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_lowerCAmelCase , logits=_lowerCAmelCase , hidden_states=outputs.hidden_states )
31
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) # TODO Update this UpperCamelCase_ = { """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'esm' def __init__( self : Optional[Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Optional[Any]=3072 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=1026 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : int , ): '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : Any =vocab_size lowercase : List[Any] =hidden_size lowercase : Any =num_hidden_layers lowercase : Optional[Any] =num_attention_heads lowercase : Tuple =intermediate_size lowercase : int =hidden_dropout_prob lowercase : Dict =attention_probs_dropout_prob lowercase : Optional[int] =max_position_embeddings lowercase : Union[str, Any] =initializer_range lowercase : Tuple =layer_norm_eps lowercase : Union[str, Any] =position_embedding_type lowercase : List[Any] =use_cache lowercase : Dict =emb_layer_norm_before lowercase : Optional[Any] =token_dropout lowercase : Union[str, Any] =is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) lowercase : Any =EsmFoldConfig() elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase : Optional[int] =EsmFoldConfig(**UpperCAmelCase__ ) lowercase : Union[str, Any] =esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) lowercase : int =get_default_vocab_list() else: lowercase : Tuple =vocab_list else: lowercase : Union[str, Any] =None lowercase : Dict =None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase__ ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Union[str, Any] =super().to_dict() if isinstance(self.esmfold_config , UpperCAmelCase__ ): lowercase : Optional[Any] =self.esmfold_config.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = None lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = 0 lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = 1_28 lowerCamelCase_ = None def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' if self.trunk is None: lowercase : str =TrunkConfig() elif isinstance(self.trunk , UpperCAmelCase__ ): lowercase : int =TrunkConfig(**self.trunk ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =asdict(self ) lowercase : Union[str, Any] =self.trunk.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 48 lowerCamelCase_ = 10_24 lowerCamelCase_ = 1_28 lowerCamelCase_ = 32 lowerCamelCase_ = 32 lowerCamelCase_ = 32 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = False lowerCamelCase_ = 4 lowerCamelCase_ = 1_28 lowerCamelCase_ = None def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' if self.structure_module is None: lowercase : Any =StructureModuleConfig() elif isinstance(self.structure_module , UpperCAmelCase__ ): lowercase : Union[str, Any] =StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) lowercase : str =self.sequence_state_dim // self.sequence_head_width lowercase : int =self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : List[Any] =asdict(self ) lowercase : Any =self.structure_module.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 3_84 lowerCamelCase_ = 1_28 lowerCamelCase_ = 16 lowerCamelCase_ = 1_28 lowerCamelCase_ = 12 lowerCamelCase_ = 4 lowerCamelCase_ = 8 lowerCamelCase_ = 0.1 lowerCamelCase_ = 8 lowerCamelCase_ = 1 lowerCamelCase_ = 2 lowerCamelCase_ = 7 lowerCamelCase_ = 10 lowerCamelCase_ = 1E-8 lowerCamelCase_ = 1E5 def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return asdict(self ) def _lowerCAmelCase ( ) -> Optional[int]: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
92
0
import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class __UpperCamelCase ( A__ ): def __init__( self , *_UpperCamelCase , **_UpperCamelCase ): warnings.warn( '''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use LayoutLMv2ImageProcessor instead.''' , _UpperCamelCase , ) super().__init__(*_UpperCamelCase , **_UpperCamelCase )
32
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCamelCase_ = abspath(join(dirname(__file__), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def _lowerCAmelCase ( __magic_name__ : int ) -> Tuple: config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def _lowerCAmelCase ( __magic_name__ : int ) -> Any: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Any ) -> Any: from transformers.testing_utils import pytest_terminal_summary_main lowercase : Optional[Any] =terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> List[str]: # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: lowercase : Optional[int] =0 # Doctest custom flag to ignore output. UpperCamelCase_ = doctest.register_optionflag("""IGNORE_RESULT""") UpperCamelCase_ = doctest.OutputChecker class __SCREAMING_SNAKE_CASE ( lowercase__ ): def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ): '''simple docstring''' if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_ = CustomOutputChecker UpperCamelCase_ = HfDoctestModule UpperCamelCase_ = HfDocTestParser
92
0
from functools import lru_cache @lru_cache def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int: if num < 0: raise ValueError('''Number should not be negative.''' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
33
'''simple docstring''' from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = ['pixel_values'] def __init__( self : List[Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 8 , **UpperCAmelCase__ : str , ): '''simple docstring''' super().__init__(**UpperCAmelCase__ ) lowercase : Union[str, Any] =do_rescale lowercase : List[Any] =rescale_factor lowercase : Tuple =do_pad lowercase : List[str] =pad_size def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] ): '''simple docstring''' return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ): '''simple docstring''' lowercase , lowercase : Union[str, Any] =get_image_size(UpperCAmelCase__ ) lowercase : Tuple =(old_height // size + 1) * size - old_height lowercase : Tuple =(old_width // size + 1) * size - old_width return pad(UpperCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : Optional[Any] , ): '''simple docstring''' lowercase : int =do_rescale if do_rescale is not None else self.do_rescale lowercase : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor lowercase : int =do_pad if do_pad is not None else self.do_pad lowercase : List[Any] =pad_size if pad_size is not None else self.pad_size lowercase : Any =make_list_of_images(UpperCAmelCase__ ) if not valid_images(UpperCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. lowercase : Dict =[to_numpy_array(UpperCAmelCase__ ) for image in images] if do_rescale: lowercase : Tuple =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images] if do_pad: lowercase : Union[str, Any] =[self.pad(UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images] lowercase : Dict =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images] lowercase : Any ={'''pixel_values''': images} return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
92
0
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case_ ( lowerCamelCase_ , unittest.TestCase ): """simple docstring""" A_ = LEDTokenizer A_ = LEDTokenizerFast A_ = True def UpperCAmelCase__ ( self) -> int: super().setUp() UpperCamelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_)))) UpperCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] UpperCamelCase = {'''unk_token''': '''<unk>'''} UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file''']) with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp: fp.write(json.dumps(lowerCamelCase_) + '''\n''') with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp: fp.write('''\n'''.join(lowerCamelCase_)) def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Union[str, Any]: kwargs.update(self.special_tokens_map) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_) def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Optional[int]: kwargs.update(self.special_tokens_map) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_) def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple: return "lower newer", "lower newer" @cached_property def UpperCAmelCase__ ( self) -> Any: return LEDTokenizer.from_pretrained('''allenai/led-base-16384''') @cached_property def UpperCAmelCase__ ( self) -> Any: return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''') @require_torch def UpperCAmelCase__ ( self) -> str: UpperCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] UpperCamelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCamelCase = tokenizer(lowerCamelCase_ , max_length=len(lowerCamelCase_) , padding=lowerCamelCase_ , return_tensors='''pt''') self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_) self.assertEqual((2, 9) , batch.input_ids.shape) self.assertEqual((2, 9) , batch.attention_mask.shape) UpperCamelCase = batch.input_ids.tolist()[0] self.assertListEqual(lowerCamelCase_ , lowerCamelCase_) @require_torch def UpperCAmelCase__ ( self) -> Any: UpperCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCamelCase = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''pt''') self.assertIn('''input_ids''' , lowerCamelCase_) self.assertIn('''attention_mask''' , lowerCamelCase_) self.assertNotIn('''labels''' , lowerCamelCase_) self.assertNotIn('''decoder_attention_mask''' , lowerCamelCase_) @require_torch def UpperCAmelCase__ ( self) -> Union[str, Any]: UpperCamelCase = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCamelCase = tokenizer(text_target=lowerCamelCase_ , max_length=3_2 , padding='''max_length''' , return_tensors='''pt''') self.assertEqual(3_2 , targets['''input_ids'''].shape[1]) @require_torch def UpperCAmelCase__ ( self) -> Union[str, Any]: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCamelCase = tokenizer( ['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''] , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors='''pt''') self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_) self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2)) @require_torch def UpperCAmelCase__ ( self) -> List[str]: UpperCamelCase = ['''A long paragraph for summarization.'''] UpperCamelCase = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCamelCase = tokenizer(lowerCamelCase_ , return_tensors='''pt''') UpperCamelCase = tokenizer(text_target=lowerCamelCase_ , return_tensors='''pt''') UpperCamelCase = inputs['''input_ids'''] UpperCamelCase = targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item()) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item()) @require_torch def UpperCAmelCase__ ( self) -> Dict: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCamelCase = ['''Summary of the text.''', '''Another summary.'''] UpperCamelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] UpperCamelCase = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_) UpperCamelCase = [[0] * len(lowerCamelCase_) for x in encoded_output['''input_ids''']] UpperCamelCase = tokenizer.pad(lowerCamelCase_) self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCamelCase_) def UpperCAmelCase__ ( self) -> Tuple: pass def UpperCAmelCase__ ( self) -> Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'): UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_) UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_) UpperCamelCase = '''A, <mask> AllenNLP sentence.''' UpperCamelCase = tokenizer_r.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_) UpperCamelCase = tokenizer_p.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_) self.assertEqual(sum(tokens_r['''token_type_ids''']) , sum(tokens_p['''token_type_ids'''])) self.assertEqual( sum(tokens_r['''attention_mask''']) / len(tokens_r['''attention_mask''']) , sum(tokens_p['''attention_mask''']) / len(tokens_p['''attention_mask''']) , ) UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids''']) UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids''']) self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2]) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2]) self.assertSequenceEqual( lowerCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>''']) self.assertSequenceEqual( lowerCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
34
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""MBartTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""MBartTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """MBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """MBartForCausalLM""", """MBartForConditionalGeneration""", """MBartForQuestionAnswering""", """MBartForSequenceClassification""", """MBartModel""", """MBartPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """TFMBartForConditionalGeneration""", """TFMBartModel""", """TFMBartPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """FlaxMBartForConditionalGeneration""", """FlaxMBartForQuestionAnswering""", """FlaxMBartForSequenceClassification""", """FlaxMBartModel""", """FlaxMBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class lowercase ( _UpperCAmelCase ): lowerCamelCase : Tuple = '''naver-clova-ix/donut-base-finetuned-docvqa''' lowerCamelCase : List[str] = ( '''This is a tool that answers a question about an document (pdf). It takes an input named `document` which ''' '''should be the document containing the information, as well as a `question` that is the question about the ''' '''document. It returns a text that contains the answer to the question.''' ) lowerCamelCase : Optional[Any] = '''document_qa''' lowerCamelCase : List[str] = AutoProcessor lowerCamelCase : int = VisionEncoderDecoderModel lowerCamelCase : str = ['''image''', '''text'''] lowerCamelCase : Optional[Any] = ['''text'''] def __init__( self : List[Any] , *_lowercase : str , **_lowercase : Tuple ): if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*_lowercase , **_lowercase ) def lowercase__ ( self : Optional[Any] , _lowercase : "Image" , _lowercase : str ): SCREAMING_SNAKE_CASE__ : int = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' SCREAMING_SNAKE_CASE__ : Optional[int] = task_prompt.replace('''{user_input}''' , _lowercase ) SCREAMING_SNAKE_CASE__ : int = self.pre_processor.tokenizer( _lowercase , add_special_tokens=_lowercase , return_tensors='''pt''' ).input_ids SCREAMING_SNAKE_CASE__ : Dict = self.pre_processor(_lowercase , return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def lowercase__ ( self : Dict , _lowercase : List[Any] ): return self.model.generate( inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_lowercase , ).sequences def lowercase__ ( self : Optional[Any] , _lowercase : List[str] ): SCREAMING_SNAKE_CASE__ : List[str] = self.pre_processor.batch_decode(_lowercase )[0] SCREAMING_SNAKE_CASE__ : List[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' ) SCREAMING_SNAKE_CASE__ : List[Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.sub(R'''<.*?>''' , '''''' , _lowercase , count=1 ).strip() # remove first task start token SCREAMING_SNAKE_CASE__ : Optional[Any] = self.pre_processor.tokenajson(_lowercase ) return sequence["answer"]
35
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCamelCase_ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
0
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
36
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process UpperCamelCase_ = logging.getLogger(__name__) def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : str ) -> Union[str, Any]: return (preds == labels).mean() @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} ) lowerCamelCase_ = field(metadata={'help': 'Should contain the data files for the task.'} ) lowerCamelCase_ = field( default=1_28 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def _lowerCAmelCase ( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase : Dict =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowercase , lowercase , lowercase : List[Any] =parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , __magic_name__ ) # Set seed set_seed(training_args.seed ) try: lowercase : Any =processors[data_args.task_name]() lowercase : Optional[int] =processor.get_labels() lowercase : str =len(__magic_name__ ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase : List[str] =AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) lowercase : int =AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase : Any =AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , ) # Get datasets lowercase : int =( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowercase : Union[str, Any] =( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(__magic_name__ : EvalPrediction ) -> Dict: lowercase : Dict =np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(__magic_name__ , p.label_ids )} # Data collator lowercase : List[str] =DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowercase : Dict =Trainer( model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowercase : Optional[Any] ={} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowercase : List[Any] =trainer.evaluate() lowercase : Any =os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_master(): with open(__magic_name__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ ) writer.write('''%s = %s\n''' % (key, value) ) results.update(__magic_name__ ) return results def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
92
0
from ..utils import DummyObject, requires_backends class A__ ( metaclass=A__ ): """simple docstring""" _lowercase = ['transformers', 'torch', 'note_seq'] def __init__( self : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ): requires_backends(self , ["transformers", "torch", "note_seq"] ) @classmethod def _UpperCamelCase( cls : str , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ): requires_backends(cls , ["transformers", "torch", "note_seq"] ) @classmethod def _UpperCamelCase( cls : str , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Any ): requires_backends(cls , ["transformers", "torch", "note_seq"] )
37
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase_ = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu""" def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Tuple=100 , __magic_name__ : Optional[int]=" " ) -> List[str]: lowercase : List[Any] =text.split(__magic_name__ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__magic_name__ ) , __magic_name__ )] def _lowerCAmelCase ( __magic_name__ : dict ) -> dict: lowercase , lowercase : int =[], [] for title, text in zip(documents['''title'''] , documents['''text'''] ): if text is not None: for passage in split_text(__magic_name__ ): titles.append(title if title is not None else '''''' ) texts.append(__magic_name__ ) return {"title": titles, "text": texts} def _lowerCAmelCase ( __magic_name__ : dict , __magic_name__ : DPRContextEncoder , __magic_name__ : DPRContextEncoderTokenizerFast ) -> dict: lowercase : Dict =ctx_tokenizer( documents['''title'''] , documents['''text'''] , truncation=__magic_name__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids'''] lowercase : Optional[int] =ctx_encoder(input_ids.to(device=__magic_name__ ) , return_dict=__magic_name__ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def _lowerCAmelCase ( __magic_name__ : "RagExampleArguments" , __magic_name__ : "ProcessingArguments" , __magic_name__ : "IndexHnswArguments" , ) -> str: ###################################### logger.info('''Step 1 - Create the dataset''' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowercase : Tuple =load_dataset( '''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowercase : Optional[int] =dataset.map(__magic_name__ , batched=__magic_name__ , num_proc=processing_args.num_proc ) # And compute the embeddings lowercase : Any =DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__magic_name__ ) lowercase : Any =DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) lowercase : Optional[int] =Features( {'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space lowercase : Optional[Any] =dataset.map( partial(__magic_name__ , ctx_encoder=__magic_name__ , ctx_tokenizer=__magic_name__ ) , batched=__magic_name__ , batch_size=processing_args.batch_size , features=__magic_name__ , ) # And finally save your dataset lowercase : Optional[Any] =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' ) dataset.save_to_disk(__magic_name__ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowercase : Union[str, Any] =faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('''embeddings''' , custom_index=__magic_name__ ) # And save the index lowercase : Dict =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' ) dataset.get_index('''embeddings''' ).save(__magic_name__ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , ) lowerCamelCase_ = field( default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , ) lowerCamelCase_ = field( default='facebook/dpr-ctx_encoder-multiset-base' , metadata={ 'help': ( 'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or' ' \'facebook/dpr-ctx_encoder-multiset-base\'' ) } , ) lowerCamelCase_ = field( default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=lowercase__ , metadata={ 'help': 'The number of processes to use to split the documents into passages. Default is single process.' } , ) lowerCamelCase_ = field( default=16 , metadata={ 'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.' } , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=7_68 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , ) lowerCamelCase_ = field( default=1_28 , metadata={ 'help': ( 'The number of bi-directional links created for every new element during the HNSW index construction.' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase_ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
92
0
'''simple docstring''' import sys import turtle def UpperCamelCase__ ( __magic_name__ : tuple[float, float] , __magic_name__ : tuple[float, float] ) -> tuple[float, float]: '''simple docstring''' return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def UpperCamelCase__ ( __magic_name__ : tuple[float, float] , __magic_name__ : tuple[float, float] , __magic_name__ : tuple[float, float] , __magic_name__ : int , ) -> None: '''simple docstring''' my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(__magic_name__ , get_mid(__magic_name__ , __magic_name__ ) , get_mid(__magic_name__ , __magic_name__ ) , depth - 1 ) triangle(__magic_name__ , get_mid(__magic_name__ , __magic_name__ ) , get_mid(__magic_name__ , __magic_name__ ) , depth - 1 ) triangle(__magic_name__ , get_mid(__magic_name__ , __magic_name__ ) , get_mid(__magic_name__ , __magic_name__ ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( "Correct format for using this script: " "python fractals.py <int:depth_for_fractal>" ) A_ : int = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("red") A_ : Optional[Any] = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
38
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right UpperCamelCase_ = 128022 UpperCamelCase_ = 128028 @require_sentencepiece class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = MaMaaaTokenizer lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = True def lowerCamelCase_ ( self : Dict ): '''simple docstring''' super().setUp() lowercase : Dict =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] lowercase : List[Any] =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) lowercase : List[Any] =Path(self.tmpdirname ) save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) lowercase : Tuple =MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : Any , **UpperCAmelCase__ : int ): '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Dict ): '''simple docstring''' return ( "This is a test", "This is a test", ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Tuple ='''</s>''' lowercase : Union[str, Any] =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[Any] =self.get_tokenizer() lowercase : Optional[Any] =list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''</s>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''<s>''' ) self.assertEqual(len(UpperCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip('''Skip this test while all models are still to be uploaded.''' ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Union[str, Any] =self.get_tokenizer() lowercase : str =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2, 3, 4, 5, 6] , ) lowercase : Optional[int] =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) lowercase : Tuple =tokenizer.convert_tokens_to_string(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , '''This is a test''' ) @slow def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' # fmt: off lowercase : int ={'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , ) @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): lowerCamelCase_ = 'facebook/m2m100_418M' lowerCamelCase_ = [ 'In my opinion, there are two levels of response from the French government.', 'NSA Affair Emphasizes Complete Lack of Debate on Intelligence', ] lowerCamelCase_ = [ 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', ] # fmt: off lowerCamelCase_ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2] @classmethod def lowerCamelCase_ ( cls : Optional[Any] ): '''simple docstring''' lowercase : MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' ) lowercase : Optional[int] =1 return cls def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128006 ) self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128022 ) self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128076 ) self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128063 ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : List[str] =self.tokenizer.get_vocab() self.assertEqual(len(UpperCAmelCase__ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab['''<unk>'''] , 3 ) self.assertIn(self.tokenizer.get_lang_token('''en''' ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : List[Any] ='''en''' lowercase : Optional[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids ) # fmt: off lowercase : str =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] # fmt: on lowercase : Optional[Any] =self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) lowercase : Optional[Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Any =tempfile.mkdtemp() lowercase : Tuple =self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(UpperCAmelCase__ ) lowercase : Union[str, Any] =MaMaaaTokenizer.from_pretrained(UpperCAmelCase__ ) self.assertDictEqual(new_tok.lang_token_to_id , UpperCAmelCase__ ) @require_torch def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[str] ='''en''' lowercase : int ='''fr''' lowercase : Union[str, Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , return_tensors='''pt''' ) lowercase : str =shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: lowercase : int =batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Optional[int] ='''mr''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) lowercase : Union[str, Any] ='''zh''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int ='''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) lowercase : Optional[Any] ='''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Optional[Any] =self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' ) self.assertEqual( nested_simplify(UpperCAmelCase__ ) , { # en_XX, A, test, EOS '''input_ids''': [[128022, 58, 4183, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 128006, } , )
92
0
import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger lowerCAmelCase_ = get_logger(__name__) lowerCAmelCase_ = Path(__file__).parent / '''model_card_template.md''' lowerCAmelCase_ = uuida().hex lowerCAmelCase_ = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES lowerCAmelCase_ = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES lowerCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/''' def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None ): snake_case_ = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}''' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += F'''; torch/{_torch_version}''' if is_flax_available(): ua += F'''; jax/{_jax_version}''' ua += F'''; flax/{_flax_version}''' if is_onnx_available(): ua += F'''; onnxruntime/{_onnxruntime_version}''' # CI will set this value to True if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): ua += "; " + user_agent return ua def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ): if token is None: snake_case_ = HfFolder.get_token() if organization is None: snake_case_ = whoami(SCREAMING_SNAKE_CASE__ )['''name'''] return F'''{username}/{model_id}''' else: return F'''{organization}/{model_id}''' def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if not is_jinja_available(): raise ValueError( '''Modelcard rendering is based on Jinja templates.''' ''' Please make sure to have `jinja` installed before using `create_model_card`.''' ''' To install it, please run `pip install Jinja2`.''' ) if hasattr(SCREAMING_SNAKE_CASE__ , '''local_rank''' ) and args.local_rank not in [-1, 0]: return snake_case_ = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , '''hub_token''' ) else None snake_case_ = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ ) snake_case_ = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''gradient_accumulation_steps''' ) else None ) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , ) snake_case_ = os.path.join(args.output_dir , '''README.md''' ) model_card.save(SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): if resolved_file is None or commit_hash is not None: return commit_hash snake_case_ = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() ) snake_case_ = re.search(R'''snapshots/([^/]+)/''' , SCREAMING_SNAKE_CASE__ ) if search is None: return None snake_case_ = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. lowerCAmelCase_ = os.path.expanduser( os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface''')) ) lowerCAmelCase_ = os.path.join(hf_cache_home, '''diffusers''') def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ): if new_cache_dir is None: snake_case_ = DIFFUSERS_CACHE if old_cache_dir is None: snake_case_ = old_diffusers_cache snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser() snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser() for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): snake_case_ = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ ) new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) try: os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) except OSError: logger.warning( '''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). lowerCAmelCase_ = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''') if not os.path.isfile(cache_version_file): lowerCAmelCase_ = 0 else: with open(cache_version_file) as f: try: lowerCAmelCase_ = int(f.read()) except ValueError: lowerCAmelCase_ = 0 if cache_version < 1: lowerCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( '''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ''' '''existing cached models. This is a one-time operation, you can interrupt it or run it ''' '''later by calling `diffusers.utils.hub_utils.move_cache()`.''' ) try: move_cache() except Exception as e: lowerCAmelCase_ = '''\n'''.join(traceback.format_tb(e.__traceback__)) logger.error( f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ '''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ''' '''message and we will do our best to help.''' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, '''w''') as f: f.write('''1''') except Exception: logger.warning( f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ '''the directory exists and can be written to.''' ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): if variant is not None: snake_case_ = weights_name.split('''.''' ) snake_case_ = splits[:-1] + [variant] + splits[-1:] snake_case_ = '''.'''.join(SCREAMING_SNAKE_CASE__ ) return weights_name def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , *, SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ): snake_case_ = str(SCREAMING_SNAKE_CASE__ ) if os.path.isfile(SCREAMING_SNAKE_CASE__ ): return pretrained_model_name_or_path elif os.path.isdir(SCREAMING_SNAKE_CASE__ ): if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ): # Load from a PyTorch checkpoint snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ): snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return model_file else: raise EnvironmentError( F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse('''0.20.0''' ) ): try: snake_case_ = hf_hub_download( SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , ) warnings.warn( F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , SCREAMING_SNAKE_CASE__ , ) return model_file except: # noqa: E722 warnings.warn( F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}\' so that the correct variant file can be added.''' , SCREAMING_SNAKE_CASE__ , ) try: # 2. Load model file as usual snake_case_ = hf_hub_download( SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ''' '''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a ''' '''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli ''' '''login`.''' ) except RevisionNotFoundError: raise EnvironmentError( F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ''' '''this model name. Check the model page at ''' F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' ) except EntryNotFoundError: raise EnvironmentError( F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' ) except HTTPError as err: raise EnvironmentError( F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' ) except ValueError: raise EnvironmentError( F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it''' F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a''' F''' directory containing a file named {weights_name} or''' ''' \nCheckout your internet connection or see how to run the library in''' ''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' ) except EnvironmentError: raise EnvironmentError( F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ''' '''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. ''' F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ''' F'''containing a file named {weights_name}''' )
39
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int = 600851475143 ) -> int: try: lowercase : Any =int(__magic_name__ ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) lowercase : Optional[Any] =2 lowercase : Dict =0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 lowercase : Union[str, Any] =i while n % i == 0: lowercase : Optional[int] =n // i i += 1 return int(__magic_name__ ) if __name__ == "__main__": print(f'''{solution() = }''')
92
0
from math import factorial class lowerCAmelCase_ : def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple: UpperCamelCase : Tuple = real if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : int = [1] * rank else: UpperCamelCase : List[Any] = rank def __repr__( self ) -> Any: return ( F"""{self.real}+""" F"""{"+".join(str(SCREAMING_SNAKE_CASE_ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}""" ) def snake_case_ ( self ) -> str: UpperCamelCase : Optional[Any] = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real, SCREAMING_SNAKE_CASE_ ) def __add__( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: if not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): return Dual(self.real + other, self.duals ) UpperCamelCase : List[str] = self.duals.copy() UpperCamelCase : Optional[int] = other.duals.copy() if len(SCREAMING_SNAKE_CASE_ ) > len(SCREAMING_SNAKE_CASE_ ): o_dual.extend([1] * (len(SCREAMING_SNAKE_CASE_ ) - len(SCREAMING_SNAKE_CASE_ )) ) elif len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ): s_dual.extend([1] * (len(SCREAMING_SNAKE_CASE_ ) - len(SCREAMING_SNAKE_CASE_ )) ) UpperCamelCase : Optional[Any] = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase__ : List[Any] = __add__ def __sub__( self, SCREAMING_SNAKE_CASE_ ) -> List[str]: return self + other * -1 def __mul__( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]: if not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : List[str] = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other, SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase__ : int = __mul__ def __truediv__( self, SCREAMING_SNAKE_CASE_ ) -> Tuple: if not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Dict = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other, SCREAMING_SNAKE_CASE_ ) raise ValueError def __floordiv__( self, SCREAMING_SNAKE_CASE_ ) -> int: if not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Any = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other, SCREAMING_SNAKE_CASE_ ) raise ValueError def __pow__( self, SCREAMING_SNAKE_CASE_ ) -> List[str]: if n < 0 or isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): raise ValueError('power must be a positive integer' ) if n == 0: return 1 if n == 1: return self UpperCamelCase : Union[str, Any] = self for _ in range(n - 1 ): x *= self return x def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Dict ) -> Optional[int]: if not callable(snake_case__ ): raise ValueError('differentiate() requires a function as input for func' ) if not isinstance(snake_case__ , (float, int) ): raise ValueError('differentiate() requires a float as input for position' ) if not isinstance(snake_case__ , snake_case__ ): raise ValueError('differentiate() requires an int as input for order' ) UpperCamelCase : Optional[int] = Dual(snake_case__ , 1 ) UpperCamelCase : Dict = func(snake_case__ ) if order == 0: return result.real return result.duals[order - 1] * factorial(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod() def UpperCamelCase ( snake_case__ : str ) -> Optional[Any]: return y**2 * y**4 print(differentiate(f, 9, 2))
40
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'speech_to_text_2' lowerCamelCase_ = ['past_key_values'] lowerCamelCase_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : int , UpperCAmelCase__ : Dict=10000 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2048 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[Any]=1024 , **UpperCAmelCase__ : Dict , ): '''simple docstring''' lowercase : List[str] =vocab_size lowercase : Optional[int] =d_model lowercase : Optional[Any] =decoder_ffn_dim lowercase : Any =decoder_layers lowercase : Dict =decoder_attention_heads lowercase : List[Any] =dropout lowercase : List[Any] =attention_dropout lowercase : Any =activation_dropout lowercase : Optional[Any] =activation_function lowercase : Optional[int] =init_std lowercase : Dict =decoder_layerdrop lowercase : Optional[int] =use_cache lowercase : Optional[Any] =decoder_layers lowercase : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True lowercase : str =max_target_positions super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
92
0
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = (PNDMScheduler,) SCREAMING_SNAKE_CASE : str = (('num_inference_steps', 5_0),) def SCREAMING_SNAKE_CASE ( self : List[Any] ,**lowercase__ : List[Any] ): __lowercase = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', } config.update(**lowercase__ ) return config def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str]=0 ,**lowercase__ : Tuple ): __lowercase = dict(self.forward_default_kwargs ) __lowercase = kwargs.pop('''num_inference_steps''' ,lowercase__ ) __lowercase = self.dummy_sample __lowercase = 0.1 * sample __lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: __lowercase = self.get_scheduler_config(**lowercase__ ) __lowercase = scheduler_class(**lowercase__ ) scheduler.set_timesteps(lowercase__ ) # copy over dummy past residuals __lowercase = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase__ ) __lowercase = scheduler_class.from_pretrained(lowercase__ ) new_scheduler.set_timesteps(lowercase__ ) # copy over dummy past residuals __lowercase = dummy_past_residuals[:] __lowercase = scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample __lowercase = new_scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" __lowercase = scheduler.step_plms(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample __lowercase = new_scheduler.step_plms(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE ( self : int ): pass def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[Any]=0 ,**lowercase__ : Any ): __lowercase = dict(self.forward_default_kwargs ) __lowercase = kwargs.pop('''num_inference_steps''' ,lowercase__ ) __lowercase = self.dummy_sample __lowercase = 0.1 * sample __lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**lowercase__ ) scheduler.set_timesteps(lowercase__ ) # copy over dummy past residuals (must be after setting timesteps) __lowercase = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase__ ) __lowercase = scheduler_class.from_pretrained(lowercase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase__ ) # copy over dummy past residual (must be after setting timesteps) __lowercase = dummy_past_residuals[:] __lowercase = scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample __lowercase = new_scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" __lowercase = scheduler.step_plms(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample __lowercase = new_scheduler.step_plms(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE ( self : Any ,**lowercase__ : Dict ): __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config(**lowercase__ ) __lowercase = scheduler_class(**lowercase__ ) __lowercase = 1_0 __lowercase = self.dummy_model() __lowercase = self.dummy_sample_deter scheduler.set_timesteps(lowercase__ ) for i, t in enumerate(scheduler.prk_timesteps ): __lowercase = model(lowercase__ ,lowercase__ ) __lowercase = scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): __lowercase = model(lowercase__ ,lowercase__ ) __lowercase = scheduler.step_plms(lowercase__ ,lowercase__ ,lowercase__ ).prev_sample return sample def SCREAMING_SNAKE_CASE ( self : List[Any] ): __lowercase = dict(self.forward_default_kwargs ) __lowercase = kwargs.pop('''num_inference_steps''' ,lowercase__ ) for scheduler_class in self.scheduler_classes: __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**lowercase__ ) __lowercase = self.dummy_sample __lowercase = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase__ ,'''set_timesteps''' ): scheduler.set_timesteps(lowercase__ ) elif num_inference_steps is not None and not hasattr(lowercase__ ,'''set_timesteps''' ): __lowercase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] __lowercase = dummy_past_residuals[:] __lowercase = scheduler.step_prk(lowercase__ ,0 ,lowercase__ ,**lowercase__ ).prev_sample __lowercase = scheduler.step_prk(lowercase__ ,1 ,lowercase__ ,**lowercase__ ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) __lowercase = scheduler.step_plms(lowercase__ ,0 ,lowercase__ ,**lowercase__ ).prev_sample __lowercase = scheduler.step_plms(lowercase__ ,1 ,lowercase__ ,**lowercase__ ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): for timesteps in [1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase__ ) __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config(steps_offset=1 ) __lowercase = scheduler_class(**lowercase__ ) scheduler.set_timesteps(1_0 ) assert torch.equal( scheduler.timesteps ,torch.LongTensor( [9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) ,) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=lowercase__ ,beta_end=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): for t in [1, 5, 1_0]: self.check_over_forward(time_step=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): for t, num_inference_steps in zip([1, 5, 1_0] ,[1_0, 5_0, 1_0_0] ): self.check_over_forward(num_inference_steps=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : int ): # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 __lowercase = 2_7 for scheduler_class in self.scheduler_classes: __lowercase = self.dummy_sample __lowercase = 0.1 * sample __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**lowercase__ ) scheduler.set_timesteps(lowercase__ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): __lowercase = scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ).prev_sample def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): with self.assertRaises(lowercase__ ): __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**lowercase__ ) scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE ( self : int ): __lowercase = self.full_loop() __lowercase = torch.sum(torch.abs(lowercase__ ) ) __lowercase = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2 assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3 def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = self.full_loop(prediction_type='''v_prediction''' ) __lowercase = torch.sum(torch.abs(lowercase__ ) ) __lowercase = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2 assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3 def SCREAMING_SNAKE_CASE ( self : int ): # We specify different beta, so that the first alpha is 0.99 __lowercase = self.full_loop(set_alpha_to_one=lowercase__ ,beta_start=0.0_1 ) __lowercase = torch.sum(torch.abs(lowercase__ ) ) __lowercase = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2 assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3 def SCREAMING_SNAKE_CASE ( self : Tuple ): # We specify different beta, so that the first alpha is 0.99 __lowercase = self.full_loop(set_alpha_to_one=lowercase__ ,beta_start=0.0_1 ) __lowercase = torch.sum(torch.abs(lowercase__ ) ) __lowercase = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2 assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
41
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=[10, 20, 30, 40] , UpperCAmelCase__ : Any=[2, 2, 3, 2] , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[Any]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Dict=[2, 3, 4] , UpperCAmelCase__ : Optional[int]=None , ): '''simple docstring''' lowercase : List[Any] =parent lowercase : Tuple =batch_size lowercase : List[str] =image_size lowercase : List[Any] =num_channels lowercase : Union[str, Any] =num_stages lowercase : int =hidden_sizes lowercase : Any =depths lowercase : Tuple =is_training lowercase : str =use_labels lowercase : List[Any] =intermediate_size lowercase : int =hidden_act lowercase : Union[str, Any] =num_labels lowercase : Optional[int] =initializer_range lowercase : int =out_features lowercase : List[str] =out_indices lowercase : str =scope def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase : Dict =None if self.use_labels: lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_labels ) lowercase : Dict =self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Any ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' lowercase : Dict =ConvNextVaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[Any] =model(UpperCAmelCase__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Dict =ConvNextVaForImageClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : str =model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Union[str, Any] =ConvNextVaBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[int] =model(UpperCAmelCase__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase : Optional[Any] =None lowercase : str =ConvNextVaBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[Any] =model(UpperCAmelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Any =self.prepare_config_and_inputs() lowercase , lowercase , lowercase : str =config_and_inputs lowercase : Any ={'''pixel_values''': pixel_values} return config, inputs_dict def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : str =self.prepare_config_and_inputs() lowercase , lowercase , lowercase : List[str] =config_and_inputs lowercase : Optional[Any] ={'''pixel_values''': pixel_values, '''labels''': labels} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCamelCase_ = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Dict =ConvNextVaModelTester(self ) lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Any ): '''simple docstring''' return @unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase , lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_with_labels() lowercase : Optional[int] =True if model_class.__name__ in [ *get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ ), ]: continue lowercase : Dict =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.train() lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) lowercase : List[Any] =model(**UpperCAmelCase__ ).loss loss.backward() def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_with_labels() lowercase : List[Any] =False lowercase : Any =True if ( model_class.__name__ in [*get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ )] or not model_class.supports_gradient_checkpointing ): continue lowercase : Any =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.gradient_checkpointing_enable() model.train() lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) lowercase : int =model(**UpperCAmelCase__ ).loss loss.backward() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Dict =model_class(UpperCAmelCase__ ) lowercase : Union[str, Any] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase : int =[*signature.parameters.keys()] lowercase : Optional[Any] =['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' def check_hidden_states_output(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ): lowercase : int =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): lowercase : Any =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase : Dict =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase : List[Any] =self.model_tester.num_stages self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : List[str] =True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase : Tuple =True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : List[Any] =ConvNextVaModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def _lowerCAmelCase ( ) -> List[Any]: lowercase : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Tuple =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(UpperCAmelCase__ ) lowercase : int =self.default_image_processor lowercase : List[str] =prepare_img() lowercase : List[Any] =preprocessor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ ) # forward pass with torch.no_grad(): lowercase : Dict =model(**UpperCAmelCase__ ) # verify the logits lowercase : Optional[Any] =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase__ ) lowercase : Tuple =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(UpperCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
92
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A_ = { "configuration_groupvit": [ "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GroupViTConfig", "GroupViTOnnxConfig", "GroupViTTextConfig", "GroupViTVisionConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ "GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GroupViTModel", "GroupViTPreTrainedModel", "GroupViTTextModel", "GroupViTVisionModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ "TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFGroupViTModel", "TFGroupViTPreTrainedModel", "TFGroupViTTextModel", "TFGroupViTVisionModel", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
42
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels UpperCamelCase_ = object() # For specifying empty leaf dict `{}` UpperCamelCase_ = object() def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> Optional[int]: lowercase : Optional[Any] =tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(__magic_name__ ) - len(__magic_name__ ) + 1 ): lowercase : Union[str, Any] =[x.match(__magic_name__ ) for x, y in zip(__magic_name__ , ks[i:] )] if matches and all(__magic_name__ ): return True return False def _lowerCAmelCase ( __magic_name__ : Dict ) -> List[str]: def replace(__magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ): for rule, replacement in rules: if _match(__magic_name__ , __magic_name__ ): return replacement return val return replace def _lowerCAmelCase ( ) -> int: return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , __magic_name__ )), (("transformer", "wte", "embedding"), P('''mp''' , __magic_name__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__magic_name__ , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , __magic_name__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__magic_name__ , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , __magic_name__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _lowerCAmelCase ( __magic_name__ : str ) -> int: lowercase : int =_get_partition_rules() lowercase : Tuple =_replacement_rules(__magic_name__ ) lowercase : Any ={k: _unmatched for k in flatten_dict(__magic_name__ )} lowercase : Any ={k: replace(__magic_name__ , __magic_name__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__magic_name__ ) )
92
0
# HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers lowerCAmelCase = float('nan') class _a : def __init__( self: Tuple , UpperCamelCase_: List[Any] ) -> Optional[Any]: """simple docstring""" lowercase__ = sys.stdout lowercase__ = open(UpperCamelCase_ , '''a''' ) def __getattr__( self: List[str] , UpperCamelCase_: str ) -> Optional[Any]: """simple docstring""" return getattr(self.stdout , UpperCamelCase_ ) def lowerCamelCase_ ( self: Dict , UpperCamelCase_: List[str] ) -> str: """simple docstring""" self.stdout.write(UpperCamelCase_ ) # strip tqdm codes self.file.write(re.sub(r'''^.*\r''' , '''''' , UpperCamelCase_ , 0 , re.M ) ) def _a ( SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=False ): """simple docstring""" lowercase__ = [] # deal with critical env vars lowercase__ = ['''CUDA_VISIBLE_DEVICES'''] for key in env_keys: lowercase__ = os.environ.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if val is not None: cmd.append(f'{key}={val}' ) # python executable (not always needed if the script is executable) lowercase__ = sys.executable if full_python_path else sys.executable.split('''/''' )[-1] cmd.append(SCREAMING_SNAKE_CASE ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes lowercase__ = [] lowercase__ = '''''' while len(SCREAMING_SNAKE_CASE ) > 0: current_line += f'{cmd.pop(0 )} ' if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(SCREAMING_SNAKE_CASE ) lowercase__ = '''''' return "\\\n".join(SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd ) # remove --output_dir if any and set our own lowercase__ = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd ) args.base_cmd += f' --output_dir {output_dir}' # ensure we have --overwrite_output_dir lowercase__ = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , ) lowercase__ = subprocess.run(SCREAMING_SNAKE_CASE , capture_output=SCREAMING_SNAKE_CASE , text=SCREAMING_SNAKE_CASE ) if verbose: print('''STDOUT''' , result.stdout ) print('''STDERR''' , result.stderr ) # save the streams lowercase__ = variation.replace(''' ''' , '''-''' ) with open(Path(SCREAMING_SNAKE_CASE ) / f'log.{prefix}.stdout.txt' , '''w''' ) as f: f.write(result.stdout ) with open(Path(SCREAMING_SNAKE_CASE ) / f'log.{prefix}.stderr.txt' , '''w''' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('''failed''' ) return {target_metric_key: nan} with io.open(f'{output_dir}/all_results.json' , '''r''' , encoding='''utf-8''' ) as f: lowercase__ = json.load(SCREAMING_SNAKE_CASE ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ): """simple docstring""" lowercase__ = [] lowercase__ = [] lowercase__ = f'{id}: {variation:<{longest_variation_len}}' lowercase__ = f'{preamble}: ' lowercase__ = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(SCREAMING_SNAKE_CASE ) , desc=SCREAMING_SNAKE_CASE , leave=SCREAMING_SNAKE_CASE ): lowercase__ = process_run_single( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ = single_run_metrics[target_metric_key] if not math.isnan(SCREAMING_SNAKE_CASE ): metrics.append(SCREAMING_SNAKE_CASE ) results.append(SCREAMING_SNAKE_CASE ) outcome += "✓" else: outcome += "✘" lowercase__ = f'\33[2K\r{outcome}' if len(SCREAMING_SNAKE_CASE ) > 0: lowercase__ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} lowercase__ = round(mean_metrics[target_metric_key] , 2 ) lowercase__ = f'{outcome} {mean_target}' if len(SCREAMING_SNAKE_CASE ) > 1: results_str += f' {tuple(round(SCREAMING_SNAKE_CASE , 2 ) for x in results )}' print(SCREAMING_SNAKE_CASE ) lowercase__ = variation return mean_metrics else: print(SCREAMING_SNAKE_CASE ) return {variation_key: variation, target_metric_key: nan} def _a ( ): """simple docstring""" lowercase__ = torch.cuda.get_device_properties(torch.device('''cuda''' ) ) return f'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n' def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = pd.DataFrame(SCREAMING_SNAKE_CASE ) lowercase__ = '''variation''' lowercase__ = '''diff_%''' lowercase__ = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan lowercase__ = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(SCREAMING_SNAKE_CASE ): # as a fallback, use the minimal value as the sentinel lowercase__ = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(SCREAMING_SNAKE_CASE ): lowercase__ = df.apply( lambda SCREAMING_SNAKE_CASE : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='''columns''' , ) # re-order columns lowercase__ = [variation_key, target_metric_key, diff_key, *report_metric_keys] lowercase__ = df.reindex(SCREAMING_SNAKE_CASE , axis='''columns''' ) # reorder cols # capitalize lowercase__ = df.rename(str.capitalize , axis='''columns''' ) # make the cols as narrow as possible lowercase__ = df.rename(lambda SCREAMING_SNAKE_CASE : c.replace('''_''' , '''<br>''' ) , axis='''columns''' ) lowercase__ = df.rename(lambda SCREAMING_SNAKE_CASE : c.replace('''_''' , '''\n''' ) , axis='''columns''' ) lowercase__ = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum'''] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=SCREAMING_SNAKE_CASE , floatfmt='''.2f''' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=SCREAMING_SNAKE_CASE , floatfmt='''.2f''' )] print('''\n\n'''.join(SCREAMING_SNAKE_CASE ) ) def _a ( ): """simple docstring""" lowercase__ = argparse.ArgumentParser() parser.add_argument( '''--base-cmd''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''Base cmd''' , ) parser.add_argument( '''--variations''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , nargs='''+''' , required=SCREAMING_SNAKE_CASE , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , ) parser.add_argument( '''--base-variation''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , ) parser.add_argument( '''--target-metric-key''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , ) parser.add_argument( '''--report-metric-keys''' , default='''''' , type=SCREAMING_SNAKE_CASE , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , ) parser.add_argument( '''--repeat-times''' , default=1 , type=SCREAMING_SNAKE_CASE , help='''How many times to re-run each variation - an average will be reported''' , ) parser.add_argument( '''--output_dir''' , default='''output_benchmark''' , type=SCREAMING_SNAKE_CASE , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , ) parser.add_argument( '''--verbose''' , default=SCREAMING_SNAKE_CASE , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , ) lowercase__ = parser.parse_args() lowercase__ = args.output_dir Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE ) lowercase__ = get_base_command(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # split each dimension into its --foo variations lowercase__ = [list(map(str.strip , re.split(R'''\|''' , SCREAMING_SNAKE_CASE ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty lowercase__ = list(map(str.strip , map(''' '''.join , itertools.product(*SCREAMING_SNAKE_CASE ) ) ) ) lowercase__ = max(len(SCREAMING_SNAKE_CASE ) for x in variations ) # split wanted keys lowercase__ = args.report_metric_keys.split() # capture prints into a log file for convenience lowercase__ = f'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt' print(f'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' ) print(f'and this script\'s output is also piped into {report_fn}' ) lowercase__ = Tee(SCREAMING_SNAKE_CASE ) print(f'\n*** Running {len(SCREAMING_SNAKE_CASE )} benchmarks:' ) print(f'Base command: {" ".join(SCREAMING_SNAKE_CASE )}' ) lowercase__ = '''variation''' lowercase__ = [] for id, variation in enumerate(tqdm(SCREAMING_SNAKE_CASE , desc='''Total completion: ''' , leave=SCREAMING_SNAKE_CASE ) ): lowercase__ = base_cmd + variation.split() results.append( process_run( id + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , args.target_metric_key , SCREAMING_SNAKE_CASE , args.repeat_times , SCREAMING_SNAKE_CASE , args.verbose , ) ) process_results(SCREAMING_SNAKE_CASE , args.target_metric_key , SCREAMING_SNAKE_CASE , args.base_variation , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
43
'''simple docstring''' from collections import defaultdict def _lowerCAmelCase ( __magic_name__ : int ) -> int: lowercase : Optional[Any] =1 lowercase : Union[str, Any] =True for v in tree[start]: if v not in visited: ret += dfs(__magic_name__ ) if ret % 2 == 0: cuts.append(__magic_name__ ) return ret def _lowerCAmelCase ( ) -> int: dfs(1 ) if __name__ == "__main__": UpperCamelCase_ , UpperCamelCase_ = 10, 9 UpperCamelCase_ = defaultdict(list) UpperCamelCase_ = {} UpperCamelCase_ = [] UpperCamelCase_ = 0 UpperCamelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
92
0
'''simple docstring''' def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any ): """simple docstring""" return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : str=0 ): """simple docstring""" return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[column] ) def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple=float("inf" ) ): """simple docstring""" for i in range(points_counts - 1 ): for j in range(i + 1 , _lowerCAmelCase ): _lowerCamelCase : Optional[Any] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: _lowerCamelCase : Optional[int] = current_dis return min_dis def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : str=float("inf" ) ): """simple docstring""" for i in range(min(6 , points_counts - 1 ) , _lowerCAmelCase ): for j in range(max(0 , i - 6 ) , _lowerCAmelCase ): _lowerCamelCase : Any = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: _lowerCamelCase : Optional[int] = current_dis return min_dis def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ): """simple docstring""" if points_counts <= 3: return dis_between_closest_pair(_lowerCAmelCase , _lowerCAmelCase ) # recursion _lowerCamelCase : int = points_counts // 2 _lowerCamelCase : Dict = closest_pair_of_points_sqr( _lowerCAmelCase , points_sorted_on_y[:mid] , _lowerCAmelCase ) _lowerCamelCase : Optional[Any] = closest_pair_of_points_sqr( _lowerCAmelCase , points_sorted_on_y[mid:] , points_counts - mid ) _lowerCamelCase : Tuple = min(_lowerCAmelCase , _lowerCAmelCase ) _lowerCamelCase : Optional[Any] = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(_lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = dis_between_closest_in_strip( _lowerCAmelCase , len(_lowerCAmelCase ) , _lowerCAmelCase ) return min(_lowerCAmelCase , _lowerCAmelCase ) def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ): """simple docstring""" _lowerCamelCase : Tuple = column_based_sort(_lowerCAmelCase , column=0 ) _lowerCamelCase : int = column_based_sort(_lowerCAmelCase , column=1 ) return ( closest_pair_of_points_sqr( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) ** 0.5 if __name__ == "__main__": UpperCAmelCase_ : Tuple = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print('Distance:', closest_pair_of_points(points, len(points)))
44
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase_ = logging.get_logger(__name__) def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Dict: lowercase : List[str] =R'''\w+[.]\d+''' lowercase : List[str] =re.findall(__magic_name__ , __magic_name__ ) for pat in pats: lowercase : Optional[int] =key.replace(__magic_name__ , '''_'''.join(pat.split('''.''' ) ) ) return key def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> str: lowercase : Dict =pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowercase : str =pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowercase : str =pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowercase : Dict =pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowercase : Tuple =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowercase : Tuple =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowercase : str =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowercase : Optional[Any] =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowercase : Dict =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowercase : Union[str, Any] =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=42 ) -> List[str]: # Step 1: Convert pytorch tensor to numpy lowercase : Optional[Any] ={k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowercase : str =flax_model.init_weights(PRNGKey(__magic_name__ ) ) lowercase : Dict =flatten_dict(__magic_name__ ) lowercase : Dict ={} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowercase : Dict =rename_key(__magic_name__ ) lowercase : Optional[int] =tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowercase , lowercase : Any =rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowercase : Tuple =jnp.asarray(__magic_name__ ) return unflatten_dict(__magic_name__ )
92
0
from __future__ import annotations def A ( lowercase__ : list[float] ) -> float: UpperCamelCase__ :Optional[int] = 0.00 UpperCamelCase__ :Tuple = 0 for resistor in resistors: if resistor <= 0: UpperCamelCase__ :List[Any] = f"""Resistor at index {index} has a negative or zero value!""" raise ValueError(lowercase__ ) first_sum += 1 / float(lowercase__ ) index += 1 return 1 / first_sum def A ( lowercase__ : list[float] ) -> float: UpperCamelCase__ :str = 0.00 UpperCamelCase__ :int = 0 for resistor in resistors: sum_r += resistor if resistor < 0: UpperCamelCase__ :Optional[Any] = f"""Resistor at index {index} has a negative value!""" raise ValueError(lowercase__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
45
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. UpperCamelCase_ = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. UpperCamelCase_ = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. UpperCamelCase_ = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, float]: lowercase : int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] ) return (item, float(__magic_name__ )) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, str]: lowercase : Any =random.randint(0 , len(__magic_name__ ) - 1 ) lowercase : Tuple =parent_a[:random_slice] + parent_a[random_slice:] lowercase : List[str] =parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] ) -> str: lowercase : Union[str, Any] =list(__magic_name__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowercase : Dict =random.choice(__magic_name__ ) return "".join(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ) -> list[str]: lowercase : Any =[] # Generate more children proportionally to the fitness score. lowercase : Dict =int(parent_a[1] * 100 ) + 1 lowercase : List[str] =10 if child_n >= 10 else child_n for _ in range(__magic_name__ ): lowercase : List[str] =population_score[random.randint(0 , __magic_name__ )][0] lowercase , lowercase : Dict =crossover(parent_a[0] , __magic_name__ ) # Append new string to the population list. pop.append(mutate(__magic_name__ , __magic_name__ ) ) pop.append(mutate(__magic_name__ , __magic_name__ ) ) return pop def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ) -> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowercase : List[str] =f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(__magic_name__ ) # Verify that the target contains no genes besides the ones inside genes variable. lowercase : Optional[int] =sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowercase : Dict =f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(__magic_name__ ) # Generate random starting population. lowercase : int =[] for _ in range(__magic_name__ ): population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) ) # Just some logs to know what the algorithms is doing. lowercase , lowercase : Optional[int] =0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(__magic_name__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowercase : List[str] =[evaluate(__magic_name__ , __magic_name__ ) for item in population] # Check if there is a matching evolution. lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowercase : Any =population[: int(N_POPULATION / 3 )] population.clear() population.extend(__magic_name__ ) # Normalize population score to be between 0 and 1. lowercase : Dict =[ (item, score / len(__magic_name__ )) for item, score in population_score ] # This is selection for i in range(__magic_name__ ): population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(__magic_name__ ) > N_POPULATION: break if __name__ == "__main__": UpperCamelCase_ = ( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) UpperCamelCase_ = list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\""" ) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
92
0
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError("iterations must be defined as integers" ) if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) _lowerCamelCase : int = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(_lowerCamelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
46
'''simple docstring''' import datasets UpperCamelCase_ = """\ @InProceedings{conneau2018xnli, author = \"Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin\", title = \"XNLI: Evaluating Cross-lingual Sentence Representations\", booktitle = \"Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing\", year = \"2018\", publisher = \"Association for Computational Linguistics\", location = \"Brussels, Belgium\", } """ UpperCamelCase_ = """\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). """ UpperCamelCase_ = """ Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: 'accuracy': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric(\"xnli\") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} """ def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Union[str, Any]: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): def lowerCamelCase_ ( self : str ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), '''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
92
0
from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
47
'''simple docstring''' from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class __SCREAMING_SNAKE_CASE : def __init__( self : List[str] , UpperCAmelCase__ : Dict , ): '''simple docstring''' lowercase : Any =parent lowercase : Optional[int] =13 lowercase : Union[str, Any] =7 lowercase : str =30 lowercase : Optional[int] =self.seq_length + self.mem_len lowercase : Dict =15 lowercase : List[str] =True lowercase : Optional[int] =True lowercase : Tuple =99 lowercase : str =[10, 50, 80] lowercase : List[Any] =32 lowercase : Optional[int] =32 lowercase : int =4 lowercase : Any =8 lowercase : List[Any] =128 lowercase : List[str] =2 lowercase : Tuple =2 lowercase : int =None lowercase : Optional[int] =1 lowercase : int =0 lowercase : List[str] =3 lowercase : str =self.vocab_size - 1 lowercase : Tuple =0.01 def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : str =None if self.use_labels: lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Union[str, Any] =TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' random.seed(self.seed ) tf.random.set_seed(self.seed ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Tuple =TFTransfoXLModel(UpperCAmelCase__ ) lowercase , lowercase : Optional[Any] =model(UpperCAmelCase__ ).to_tuple() lowercase : List[str] ={'''input_ids''': input_ids_a, '''mems''': mems_a} lowercase , lowercase : Any =model(UpperCAmelCase__ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : int =TFTransfoXLLMHeadModel(UpperCAmelCase__ ) lowercase , lowercase : Tuple =model(UpperCAmelCase__ ).to_tuple() lowercase : Optional[Any] ={'''input_ids''': input_ids_a, '''labels''': lm_labels} lowercase , lowercase : Optional[int] =model(UpperCAmelCase__ ).to_tuple() lowercase , lowercase : List[str] =model([input_ids_a, mems_a] ).to_tuple() lowercase : int ={'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels} lowercase , lowercase : str =model(UpperCAmelCase__ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Optional[int] =TFTransfoXLForSequenceClassification(UpperCAmelCase__ ) lowercase : Union[str, Any] =model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[Any] =self.prepare_config_and_inputs() ((lowercase) , (lowercase) , (lowercase) , (lowercase)) : Optional[Any] =config_and_inputs lowercase : Union[str, Any] ={'''input_ids''': input_ids_a} return config, inputs_dict @require_tf class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) lowerCamelCase_ = () if is_tf_available() else () lowerCamelCase_ = ( { 'feature-extraction': TFTransfoXLModel, 'text-classification': TFTransfoXLForSequenceClassification, 'text-generation': TFTransfoXLLMHeadModel, 'zero-shot': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Any =TFTransfoXLModelTester(self ) lowercase : Optional[int] =ConfigTester(self , config_class=UpperCAmelCase__ , d_embed=37 ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' self.model_tester.set_seed() lowercase : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.model_tester.set_seed() lowercase : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common() lowercase : int =[TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: lowercase : str =model_class(UpperCAmelCase__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: lowercase : Union[str, Any] =model.get_output_embeddings() assert isinstance(UpperCAmelCase__ , tf.keras.layers.Layer ) lowercase : Any =model.get_bias() assert name is None else: lowercase : Optional[int] =model.get_output_embeddings() assert x is None lowercase : Optional[int] =model.get_bias() assert name is None def lowerCamelCase_ ( self : Any ): '''simple docstring''' # TODO JP: Make TransfoXL XLA compliant pass @slow def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : int =TFTransfoXLModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' ) def lowerCamelCase_ ( self : int ): '''simple docstring''' pass @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @unittest.skip('''Skip test until #12651 is resolved.''' ) @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Optional[Any] =TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' ) # fmt: off lowercase : Tuple =tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off lowercase : Optional[int] =[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> lowercase : int =model.generate(UpperCAmelCase__ , max_length=200 , do_sample=UpperCAmelCase__ ) self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__ )
92
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase__ : Union[str, Any] = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[Any] = ["ConditionalDetrFeatureExtractor"] UpperCAmelCase__ : Dict = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : List[str] = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
48
'''simple docstring''' import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __SCREAMING_SNAKE_CASE : def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Optional[Any]=36 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Tuple=None , ): '''simple docstring''' lowercase : str =parent lowercase : int =batch_size lowercase : Any =seq_length lowercase : int =is_training lowercase : str =use_input_mask lowercase : int =use_token_type_ids lowercase : Dict =use_labels lowercase : int =vocab_size lowercase : str =embedding_size lowercase : Union[str, Any] =hidden_size lowercase : Tuple =num_hidden_layers lowercase : Any =num_hidden_groups lowercase : Union[str, Any] =num_attention_heads lowercase : Any =intermediate_size lowercase : Tuple =hidden_act lowercase : Optional[int] =hidden_dropout_prob lowercase : Union[str, Any] =attention_probs_dropout_prob lowercase : List[Any] =max_position_embeddings lowercase : int =type_vocab_size lowercase : int =type_sequence_label_size lowercase : Any =initializer_range lowercase : List[Any] =num_labels lowercase : int =num_choices lowercase : Optional[int] =scope def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Optional[int] =None if self.use_input_mask: lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] ) lowercase : Dict =None if self.use_token_type_ids: lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase : Tuple =None lowercase : Any =None lowercase : Dict =None if self.use_labels: lowercase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices ) lowercase : Any =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ): '''simple docstring''' lowercase : int =AlbertModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : Dict =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : int =model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ): '''simple docstring''' lowercase : Tuple =AlbertForPreTraining(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , sentence_order_label=UpperCAmelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Tuple =AlbertForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : List[str] =AlbertForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : List[str] =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Optional[Any] =self.num_labels lowercase : Any =AlbertForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Dict =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ): '''simple docstring''' lowercase : List[Any] =self.num_labels lowercase : str =AlbertForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Optional[int] =self.num_choices lowercase : List[Any] =AlbertForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : List[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : List[str] =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Union[str, Any] =self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Dict =config_and_inputs lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase_ = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase_ = True def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=False ): '''simple docstring''' lowercase : Optional[int] =super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if return_labels: if model_class in get_values(UpperCAmelCase__ ): lowercase : Any =torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ ) lowercase : Any =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) return inputs_dict def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Tuple =AlbertModelTester(self ) lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase : Tuple =type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : str =AlbertModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : int =AlbertModel.from_pretrained('''albert-base-v2''' ) lowercase : Optional[int] =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowercase : Any =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase : Any =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] lowercase : int =torch.Size((1, 11, 768) ) self.assertEqual(output.shape , UpperCAmelCase__ ) lowercase : Union[str, Any] =torch.tensor( [[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
92
0
"""simple docstring""" import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class _UpperCAmelCase ( _lowerCAmelCase ): def __init__( self : Tuple , _lowercase : str , _lowercase : Union[str, Any]=None , _lowercase : Tuple=True , _lowercase : Tuple=None , **_lowercase : Optional[Any] ): __UpperCAmelCase = parent __UpperCAmelCase = config_class __UpperCAmelCase = has_text_modality __UpperCAmelCase = kwargs __UpperCAmelCase = common_properties def a ( self : Tuple ): __UpperCAmelCase = self.config_class(**self.inputs_dict ) __UpperCAmelCase = ( ['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers'''] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(['''vocab_size'''] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(_lowercase , _lowercase ) , msg=F'''`{prop}` does not exist''' ) # Test that config has the common properties as setter for idx, name in enumerate(_lowercase ): try: setattr(_lowercase , _lowercase , _lowercase ) self.parent.assertEqual( getattr(_lowercase , _lowercase ) , _lowercase , msg=F'''`{name} value {idx} expected, but was {getattr(_lowercase , _lowercase )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(_lowercase ): try: __UpperCAmelCase = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(_lowercase , _lowercase ) , _lowercase , msg=F'''`{name} value {idx} expected, but was {getattr(_lowercase , _lowercase )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def a ( self : List[Any] ): __UpperCAmelCase = self.config_class(**self.inputs_dict ) __UpperCAmelCase = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , _lowercase ) def a ( self : Tuple ): __UpperCAmelCase = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __UpperCAmelCase = os.path.join(_lowercase , '''config.json''' ) config_first.to_json_file(_lowercase ) __UpperCAmelCase = self.config_class.from_json_file(_lowercase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def a ( self : Dict ): __UpperCAmelCase = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(_lowercase ) __UpperCAmelCase = self.config_class.from_pretrained(_lowercase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def a ( self : Dict ): __UpperCAmelCase = self.config_class(**self.inputs_dict ) __UpperCAmelCase = '''test''' with tempfile.TemporaryDirectory() as tmpdirname: __UpperCAmelCase = os.path.join(_lowercase , _lowercase ) config_first.save_pretrained(_lowercase ) __UpperCAmelCase = self.config_class.from_pretrained(_lowercase , subfolder=_lowercase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def a ( self : Union[str, Any] ): __UpperCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) __UpperCAmelCase = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def a ( self : Any ): if self.config_class.is_composition: return __UpperCAmelCase = self.config_class() self.parent.assertIsNotNone(_lowercase ) def a ( self : Optional[int] ): __UpperCAmelCase = copy.deepcopy(_lowercase ) __UpperCAmelCase = self.config_class(**_lowercase ) __UpperCAmelCase = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) ) elif getattr(_lowercase , _lowercase ) != value: wrong_values.append((key, getattr(_lowercase , _lowercase ), value) ) if len(_lowercase ) > 0: __UpperCAmelCase = '''\n'''.join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] ) raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' ) def a ( self : Tuple ): self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
49
'''simple docstring''' import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ): '''simple docstring''' if dst_width < 0 or dst_height < 0: raise ValueError('''Destination width/height should be > 0''' ) lowercase : Union[str, Any] =img lowercase : Union[str, Any] =img.shape[1] lowercase : str =img.shape[0] lowercase : Union[str, Any] =dst_width lowercase : str =dst_height lowercase : str =self.src_w / self.dst_w lowercase : Optional[Any] =self.src_h / self.dst_h lowercase : int =( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255 ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' for i in range(self.dst_h ): for j in range(self.dst_w ): lowercase : List[Any] =self.img[self.get_y(UpperCAmelCase__ )][self.get_x(UpperCAmelCase__ )] def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ): '''simple docstring''' return int(self.ratio_x * x ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int ): '''simple docstring''' return int(self.ratio_y * y ) if __name__ == "__main__": UpperCamelCase_ , UpperCamelCase_ = 800, 600 UpperCamelCase_ = imread("""image_data/lena.jpg""", 1) UpperCamelCase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output ) waitKey(0) destroyAllWindows()
92
0
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase : Tuple = 16 UpperCamelCase : List[Any] = 32 def A__ ( __lowerCAmelCase : Accelerator , __lowerCAmelCase : int = 16 ): lowerCamelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowerCamelCase__ = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(__lowerCAmelCase : str ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCamelCase__ = datasets.map( __lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase__ = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__lowerCAmelCase : Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCamelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCamelCase__ = 16 elif accelerator.mixed_precision != "no": lowerCamelCase__ = 8 else: lowerCamelCase__ = None return tokenizer.pad( __lowerCAmelCase , padding="""longest""" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. lowerCamelCase__ = DataLoader( tokenized_datasets["""train"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase ) lowerCamelCase__ = DataLoader( tokenized_datasets["""validation"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811 def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ): # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCAmelCase ) == "1": lowerCamelCase__ = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: lowerCamelCase__ = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir ) else: lowerCamelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase__ = config["""lr"""] lowerCamelCase__ = int(config["""num_epochs"""] ) lowerCamelCase__ = int(config["""seed"""] ) lowerCamelCase__ = int(config["""batch_size"""] ) set_seed(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation lowerCamelCase__ = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowerCamelCase__ = batch_size // MAX_GPU_BATCH_SIZE lowerCamelCase__ = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCamelCase__ = model.to(accelerator.device ) # Instantiate optimizer lowerCamelCase__ = AdamW(params=model.parameters() , lr=__lowerCAmelCase ) # Instantiate scheduler lowerCamelCase__ = get_linear_schedule_with_warmup( optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: lowerCamelCase__ = os.path.split(__lowerCAmelCase )[-1].split(""".""" )[0] accelerator.init_trackers(__lowerCAmelCase , __lowerCAmelCase ) # Now we train the model for epoch in range(__lowerCAmelCase ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: lowerCamelCase__ = 0 for step, batch in enumerate(__lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCamelCase__ = model(**__lowerCAmelCase ) lowerCamelCase__ = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() lowerCamelCase__ = loss / gradient_accumulation_steps accelerator.backward(__lowerCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): lowerCamelCase__ = model(**__lowerCAmelCase ) lowerCamelCase__ = outputs.logits.argmax(dim=-1 ) lowerCamelCase__ , lowerCamelCase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__lowerCAmelCase , references=__lowerCAmelCase , ) lowerCamelCase__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , __lowerCAmelCase ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { """accuracy""": eval_metric["""accuracy"""], """f1""": eval_metric["""f1"""], """train_loss""": total_loss.item() / len(__lowerCAmelCase ), """epoch""": epoch, } , step=__lowerCAmelCase , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def A__ ( ): lowerCamelCase__ = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) parser.add_argument( """--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , ) parser.add_argument( """--project_dir""" , type=__lowerCAmelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , ) lowerCamelCase__ = parser.parse_args() lowerCamelCase__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": main()
50
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float: lowercase : Any =0.0_0 lowercase : Tuple =0 for resistor in resistors: if resistor <= 0: lowercase : Dict =f'''Resistor at index {index} has a negative or zero value!''' raise ValueError(__magic_name__ ) first_sum += 1 / float(__magic_name__ ) index += 1 return 1 / first_sum def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float: lowercase : Optional[Any] =0.0_0 lowercase : int =0 for resistor in resistors: sum_r += resistor if resistor < 0: lowercase : Tuple =f'''Resistor at index {index} has a negative value!''' raise ValueError(__magic_name__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
92
0
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' _lowerCamelCase =MgpstrTokenizer _lowerCamelCase =False _lowerCamelCase ={} _lowerCamelCase =False def __snake_case ( self : str ): super().setUp() # fmt: off UpperCAmelCase = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on UpperCAmelCase = dict(zip(a__ , range(len(a__ ) ) ) ) UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(a__ ) + '''\n''' ) def __snake_case ( self : Optional[Any] , **a__ : Dict ): return MgpstrTokenizer.from_pretrained(self.tmpdirname , **a__ ) def __snake_case ( self : List[str] , a__ : Dict ): UpperCAmelCase = '''tester''' UpperCAmelCase = '''tester''' return input_text, output_text @unittest.skip('''MGP-STR always lower cases letters.''' ) def __snake_case ( self : Any ): pass def __snake_case ( self : Optional[Any] ): UpperCAmelCase = self.get_tokenizers(do_lower_case=a__ ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): UpperCAmelCase = '''[SPECIAL_TOKEN]''' tokenizer.add_special_tokens({'''cls_token''': special_token} ) UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=a__ ) self.assertEqual(len(a__ ) , 1 ) UpperCAmelCase = tokenizer.decode(a__ , skip_special_tokens=a__ ) self.assertTrue(special_token not in decoded ) def __snake_case ( self : Optional[Any] ): UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): UpperCAmelCase, UpperCAmelCase = self.get_input_output_texts(a__ ) UpperCAmelCase = tokenizer.tokenize(a__ ) UpperCAmelCase = tokenizer.convert_tokens_to_ids(a__ ) UpperCAmelCase = tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) UpperCAmelCase = tokenizer.convert_ids_to_tokens(a__ ) self.assertNotEqual(len(a__ ) , 0 ) UpperCAmelCase = tokenizer.decode(a__ ) self.assertIsInstance(a__ , a__ ) self.assertEqual(text_a.replace(''' ''' , '''''' ) , a__ ) @unittest.skip('''MGP-STR tokenizer only handles one sequence.''' ) def __snake_case ( self : Any ): pass @unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' ) def __snake_case ( self : Tuple ): pass
51
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""", """self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""", """self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""", """self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""", """self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""", """self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""", """self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""", """self_attn.rotary_emb""": """encoder.embed_positions""", """self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""", """conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""", """conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""", """conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""", """conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""", """conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""", """ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""", """ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""", """ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""", """ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""", """ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""", """ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } UpperCamelCase_ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> str: for attribute in key.split('''.''' ): lowercase : Tuple =getattr(__magic_name__ , __magic_name__ ) if weight_type is not None: lowercase : Optional[int] =getattr(__magic_name__ , __magic_name__ ).shape else: lowercase : List[Any] =hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase : Any =value elif weight_type == "weight_g": lowercase : List[Any] =value elif weight_type == "weight_v": lowercase : Union[str, Any] =value elif weight_type == "bias": lowercase : Tuple =value elif weight_type == "running_mean": lowercase : Union[str, Any] =value elif weight_type == "running_var": lowercase : str =value elif weight_type == "num_batches_tracked": lowercase : Tuple =value elif weight_type == "inv_freq": lowercase : Optional[Any] =value else: lowercase : Tuple =value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]: lowercase : Optional[int] =[] lowercase : Tuple =fairseq_model.state_dict() lowercase : List[Any] =hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): lowercase : Tuple =False if "conv_layers" in name: load_conv_layer( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , ) lowercase : List[Any] =True else: for key, mapped_key in MAPPING.items(): lowercase : Optional[int] ='''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: lowercase : Union[str, Any] =True if "*" in mapped_key: lowercase : Optional[int] =name.split(__magic_name__ )[0].split('''.''' )[-2] lowercase : List[str] =mapped_key.replace('''*''' , __magic_name__ ) if "pos_bias_u" in name: lowercase : Optional[Any] =None elif "pos_bias_v" in name: lowercase : Union[str, Any] =None elif "weight_g" in name: lowercase : Any ='''weight_g''' elif "weight_v" in name: lowercase : Tuple ='''weight_v''' elif "bias" in name: lowercase : Optional[int] ='''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase : Optional[int] ='''weight''' elif "running_mean" in name: lowercase : Union[str, Any] ='''running_mean''' elif "inv_freq" in name: lowercase : Any ='''inv_freq''' elif "running_var" in name: lowercase : Tuple ='''running_var''' elif "num_batches_tracked" in name: lowercase : Dict ='''num_batches_tracked''' else: lowercase : str =None set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) continue if not is_used: unused_weights.append(__magic_name__ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ) -> int: lowercase : Optional[Any] =full_name.split('''conv_layers.''' )[-1] lowercase : Any =name.split('''.''' ) lowercase : List[str] =int(items[0] ) lowercase : Union[str, Any] =int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase : Union[str, Any] =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase : Optional[Any] =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) lowercase : Optional[int] =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase : str =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__magic_name__ ) @torch.no_grad() def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=True ) -> Union[str, Any]: if config_path is not None: lowercase : Optional[Any] =WavaVecaConformerConfig.from_pretrained(__magic_name__ , hidden_act='''swish''' ) else: lowercase : Optional[int] =WavaVecaConformerConfig() if "rope" in checkpoint_path: lowercase : Dict ='''rotary''' if is_finetuned: if dict_path: lowercase : Optional[Any] =Dictionary.load(__magic_name__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowercase : str =target_dict.pad_index lowercase : Union[str, Any] =target_dict.bos_index lowercase : Any =target_dict.eos_index lowercase : Tuple =len(target_dict.symbols ) lowercase : str =os.path.join(__magic_name__ , '''vocab.json''' ) if not os.path.isdir(__magic_name__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__magic_name__ ) ) return os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowercase : Dict =target_dict.indices # fairseq has the <pad> and <s> switched lowercase : str =0 lowercase : List[Any] =1 with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(__magic_name__ , __magic_name__ ) lowercase : List[str] =WavaVecaCTCTokenizer( __magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__magic_name__ , ) lowercase : Optional[int] =True if config.feat_extract_norm == '''layer''' else False lowercase : str =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , ) lowercase : Tuple =WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) processor.save_pretrained(__magic_name__ ) lowercase : str =WavaVecaConformerForCTC(__magic_name__ ) else: lowercase : Tuple =WavaVecaConformerForPreTraining(__magic_name__ ) if is_finetuned: lowercase , lowercase , lowercase : Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: lowercase : Dict =argparse.Namespace(task='''audio_pretraining''' ) lowercase : Optional[int] =fairseq.tasks.setup_task(__magic_name__ ) lowercase , lowercase , lowercase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__magic_name__ ) lowercase : List[Any] =model[0].eval() recursively_load_weights(__magic_name__ , __magic_name__ , not is_finetuned ) hf_wavavec.save_pretrained(__magic_name__ ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCamelCase_ = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
92
0
"""simple docstring""" import pytest import datasets # Import fixture modules as plugins A = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec'''] def __A ( a_ :Dict , a_ :Optional[Any]) -> Optional[Any]: # Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit") for item in items: if any(marker in item.keywords for marker in ['''integration''', '''unit''']): continue item.add_marker(pytest.mark.unit) def __A ( a_ :Dict) -> Union[str, Any]: config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''') @pytest.fixture(autouse=a_) def __A ( a_ :Union[str, Any] , a_ :int) -> Any: # test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work? __a : List[str] = tmp_path_factory.getbasetemp() / '''cache''' __a : Union[str, Any] = test_hf_cache_home / '''datasets''' __a : Tuple = test_hf_cache_home / '''metrics''' __a : Tuple = test_hf_cache_home / '''modules''' monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(a_)) monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(a_)) monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(a_)) __a : List[str] = test_hf_datasets_cache / '''downloads''' monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(a_)) __a : str = test_hf_datasets_cache / '''downloads''' / '''extracted''' monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(a_)) @pytest.fixture(autouse=a_ , scope='''session''') def __A ( ) -> Optional[int]: datasets.disable_progress_bar() @pytest.fixture(autouse=a_) def __A ( a_ :int) -> Any: # don't take tests into account when counting downloads monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , a_) @pytest.fixture def __A ( a_ :Optional[int]) -> Optional[int]: # Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0 # To be removed once SQLAlchemy 2.0 supported monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , a_)
52
'''simple docstring''' import math import flax.linen as nn import jax.numpy as jnp def _lowerCAmelCase ( __magic_name__ : jnp.ndarray , __magic_name__ : int , __magic_name__ : float = 1 , __magic_name__ : float = 1 , __magic_name__ : float = 1.0E4 , __magic_name__ : bool = False , __magic_name__ : float = 1.0 , ) -> jnp.ndarray: assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even''' lowercase : int =float(embedding_dim // 2 ) lowercase : Optional[int] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) lowercase : Any =min_timescale * jnp.exp(jnp.arange(__magic_name__ , dtype=jnp.floataa ) * -log_timescale_increment ) lowercase : List[Any] =jnp.expand_dims(__magic_name__ , 1 ) * jnp.expand_dims(__magic_name__ , 0 ) # scale embeddings lowercase : Tuple =scale * emb if flip_sin_to_cos: lowercase : Dict =jnp.concatenate([jnp.cos(__magic_name__ ), jnp.sin(__magic_name__ )] , axis=1 ) else: lowercase : Any =jnp.concatenate([jnp.sin(__magic_name__ ), jnp.cos(__magic_name__ )] , axis=1 ) lowercase : List[str] =jnp.reshape(__magic_name__ , [jnp.shape(__magic_name__ )[0], embedding_dim] ) return signal class __SCREAMING_SNAKE_CASE ( nn.Module ): lowerCamelCase_ = 32 lowerCamelCase_ = jnp.floataa @nn.compact def __call__( self : Tuple , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : List[Any] =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCAmelCase__ ) lowercase : Any =nn.silu(UpperCAmelCase__ ) lowercase : int =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCAmelCase__ ) return temb class __SCREAMING_SNAKE_CASE ( nn.Module ): lowerCamelCase_ = 32 lowerCamelCase_ = False lowerCamelCase_ = 1 @nn.compact def __call__( self : int , UpperCAmelCase__ : str ): '''simple docstring''' return get_sinusoidal_embeddings( UpperCAmelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
92
0
from math import isclose, sqrt def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float, lowerCAmelCase_ : float ): __lowerCAmelCase = point_y / 4 / point_x __lowerCAmelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) __lowerCAmelCase = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) __lowerCAmelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 __lowerCAmelCase = outgoing_gradient**2 + 4 __lowerCAmelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) __lowerCAmelCase = (point_y - outgoing_gradient * point_x) ** 2 - 100 __lowerCAmelCase = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) __lowerCAmelCase = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point __lowerCAmelCase = x_minus if isclose(lowerCAmelCase_, lowerCAmelCase_ ) else x_plus __lowerCAmelCase = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a_ ( lowerCAmelCase_ : float = 1.4, lowerCAmelCase_ : float = -9.6 ): __lowerCAmelCase = 0 __lowerCAmelCase = first_x_coord __lowerCAmelCase = first_y_coord __lowerCAmelCase = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = next_point(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(F"""{solution() = }""")
53
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) # TODO Update this UpperCamelCase_ = { """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'esm' def __init__( self : Optional[Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Optional[Any]=3072 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=1026 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : int , ): '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : Any =vocab_size lowercase : List[Any] =hidden_size lowercase : Any =num_hidden_layers lowercase : Optional[Any] =num_attention_heads lowercase : Tuple =intermediate_size lowercase : int =hidden_dropout_prob lowercase : Dict =attention_probs_dropout_prob lowercase : Optional[int] =max_position_embeddings lowercase : Union[str, Any] =initializer_range lowercase : Tuple =layer_norm_eps lowercase : Union[str, Any] =position_embedding_type lowercase : List[Any] =use_cache lowercase : Dict =emb_layer_norm_before lowercase : Optional[Any] =token_dropout lowercase : Union[str, Any] =is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) lowercase : Any =EsmFoldConfig() elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase : Optional[int] =EsmFoldConfig(**UpperCAmelCase__ ) lowercase : Union[str, Any] =esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) lowercase : int =get_default_vocab_list() else: lowercase : Tuple =vocab_list else: lowercase : Union[str, Any] =None lowercase : Dict =None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase__ ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Union[str, Any] =super().to_dict() if isinstance(self.esmfold_config , UpperCAmelCase__ ): lowercase : Optional[Any] =self.esmfold_config.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = None lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = 0 lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = 1_28 lowerCamelCase_ = None def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' if self.trunk is None: lowercase : str =TrunkConfig() elif isinstance(self.trunk , UpperCAmelCase__ ): lowercase : int =TrunkConfig(**self.trunk ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =asdict(self ) lowercase : Union[str, Any] =self.trunk.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 48 lowerCamelCase_ = 10_24 lowerCamelCase_ = 1_28 lowerCamelCase_ = 32 lowerCamelCase_ = 32 lowerCamelCase_ = 32 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = False lowerCamelCase_ = 4 lowerCamelCase_ = 1_28 lowerCamelCase_ = None def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' if self.structure_module is None: lowercase : Any =StructureModuleConfig() elif isinstance(self.structure_module , UpperCAmelCase__ ): lowercase : Union[str, Any] =StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) lowercase : str =self.sequence_state_dim // self.sequence_head_width lowercase : int =self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : List[Any] =asdict(self ) lowercase : Any =self.structure_module.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 3_84 lowerCamelCase_ = 1_28 lowerCamelCase_ = 16 lowerCamelCase_ = 1_28 lowerCamelCase_ = 12 lowerCamelCase_ = 4 lowerCamelCase_ = 8 lowerCamelCase_ = 0.1 lowerCamelCase_ = 8 lowerCamelCase_ = 1 lowerCamelCase_ = 2 lowerCamelCase_ = 7 lowerCamelCase_ = 10 lowerCamelCase_ = 1E-8 lowerCamelCase_ = 1E5 def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return asdict(self ) def _lowerCAmelCase ( ) -> Optional[int]: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
92
0
import logging import os import threading import time try: import warnings except ImportError: __lowercase : int =None try: import msvcrt except ImportError: __lowercase : List[Any] =None try: import fcntl except ImportError: __lowercase : Union[str, Any] =None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: __lowercase : List[str] =OSError # Data # ------------------------------------------------ __lowercase : Optional[Any] =[ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] __lowercase : List[Any] ="""3.0.12""" __lowercase : int =None def a__ ( ): '''simple docstring''' global _logger UpperCAmelCase_ =_logger or logging.getLogger(__name__ ) return _logger class A ( __lowercase ): def __init__( self: Any , _lowerCAmelCase: int ) -> Any: '''simple docstring''' UpperCAmelCase_ =lock_file return None def __str__( self: List[str] ) -> Any: '''simple docstring''' UpperCAmelCase_ =F'The file lock \'{self.lock_file}\' could not be acquired.' return temp class A : def __init__( self: List[Any] , _lowerCAmelCase: Tuple ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ =lock return None def __enter__( self: Tuple ) -> Optional[int]: '''simple docstring''' return self.lock def __exit__( self: Optional[Any] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: int , _lowerCAmelCase: Optional[int] ) -> Any: '''simple docstring''' self.lock.release() return None class A : def __init__( self: Optional[int] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Tuple=-1 , _lowerCAmelCase: Optional[Any]=None ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ =max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long UpperCAmelCase_ =self.hash_filename_if_too_long(_lowerCAmelCase , _lowerCAmelCase ) # The path to the lock file. UpperCAmelCase_ =lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. UpperCAmelCase_ =None # The default timeout value. UpperCAmelCase_ =timeout # We use this lock primarily for the lock counter. UpperCAmelCase_ =threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. UpperCAmelCase_ =0 return None @property def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]: '''simple docstring''' return self._lock_file @property def lowerCAmelCase__ ( self: Tuple ) -> int: '''simple docstring''' return self._timeout @timeout.setter def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[Any] ) -> List[str]: '''simple docstring''' UpperCAmelCase_ =float(_lowerCAmelCase ) return None def lowerCAmelCase__ ( self: Any ) -> Optional[Any]: '''simple docstring''' raise NotImplementedError() def lowerCAmelCase__ ( self: List[Any] ) -> List[str]: '''simple docstring''' raise NotImplementedError() @property def lowerCAmelCase__ ( self: Tuple ) -> Dict: '''simple docstring''' return self._lock_file_fd is not None def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: int=None , _lowerCAmelCase: Dict=0.05 ) -> Tuple: '''simple docstring''' if timeout is None: UpperCAmelCase_ =self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 UpperCAmelCase_ =id(self ) UpperCAmelCase_ =self._lock_file UpperCAmelCase_ =time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'Attempting to acquire lock {lock_id} on {lock_filename}' ) self._acquire() if self.is_locked: logger().debug(F'Lock {lock_id} acquired on {lock_filename}' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'Timeout on acquiring lock {lock_id} on {lock_filename}' ) raise Timeout(self._lock_file ) else: logger().debug( F'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' ) time.sleep(_lowerCAmelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: UpperCAmelCase_ =max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: Any=False ) -> Union[str, Any]: '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: UpperCAmelCase_ =id(self ) UpperCAmelCase_ =self._lock_file logger().debug(F'Attempting to release lock {lock_id} on {lock_filename}' ) self._release() UpperCAmelCase_ =0 logger().debug(F'Lock {lock_id} released on {lock_filename}' ) return None def __enter__( self: List[str] ) -> Optional[Any]: '''simple docstring''' self.acquire() return self def __exit__( self: Union[str, Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Dict ) -> Tuple: '''simple docstring''' self.release() return None def __del__( self: Dict ) -> Any: '''simple docstring''' self.release(force=_lowerCAmelCase ) return None def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: str , _lowerCAmelCase: int ) -> str: '''simple docstring''' UpperCAmelCase_ =os.path.basename(_lowerCAmelCase ) if len(_lowerCAmelCase ) > max_length and max_length > 0: UpperCAmelCase_ =os.path.dirname(_lowerCAmelCase ) UpperCAmelCase_ =str(hash(_lowerCAmelCase ) ) UpperCAmelCase_ =filename[: max_length - len(_lowerCAmelCase ) - 8] + "..." + hashed_filename + ".lock" return os.path.join(_lowerCAmelCase , _lowerCAmelCase ) else: return path class A ( __lowercase ): def __init__( self: Optional[Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[str]=-1 , _lowerCAmelCase: Tuple=None ) -> Dict: '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase ) UpperCAmelCase_ ="\\\\?\\" + relative_to_absolute_path(self.lock_file ) def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ =os.O_RDWR | os.O_CREAT | os.O_TRUNC try: UpperCAmelCase_ =os.open(self._lock_file , _lowerCAmelCase ) except OSError: pass else: try: msvcrt.locking(_lowerCAmelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_lowerCAmelCase ) else: UpperCAmelCase_ =fd return None def lowerCAmelCase__ ( self: int ) -> List[str]: '''simple docstring''' UpperCAmelCase_ =self._lock_file_fd UpperCAmelCase_ =None msvcrt.locking(_lowerCAmelCase , msvcrt.LK_UNLCK , 1 ) os.close(_lowerCAmelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class A ( __lowercase ): def __init__( self: List[Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: str=-1 , _lowerCAmelCase: Dict=None ) -> str: '''simple docstring''' UpperCAmelCase_ =os.statvfs(os.path.dirname(_lowerCAmelCase ) ).f_namemax super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase ) def lowerCAmelCase__ ( self: Optional[int] ) -> Dict: '''simple docstring''' UpperCAmelCase_ =os.O_RDWR | os.O_CREAT | os.O_TRUNC UpperCAmelCase_ =os.open(self._lock_file , _lowerCAmelCase ) try: fcntl.flock(_lowerCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_lowerCAmelCase ) else: UpperCAmelCase_ =fd return None def lowerCAmelCase__ ( self: Tuple ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ =self._lock_file_fd UpperCAmelCase_ =None fcntl.flock(_lowerCAmelCase , fcntl.LOCK_UN ) os.close(_lowerCAmelCase ) return None class A ( __lowercase ): def lowerCAmelCase__ ( self: Optional[int] ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ =os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: UpperCAmelCase_ =os.open(self._lock_file , _lowerCAmelCase ) except OSError: pass else: UpperCAmelCase_ =fd return None def lowerCAmelCase__ ( self: Union[str, Any] ) -> int: '''simple docstring''' os.close(self._lock_file_fd ) UpperCAmelCase_ =None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None __lowercase : int =None if msvcrt: __lowercase : List[Any] =WindowsFileLock elif fcntl: __lowercase : str =UnixFileLock else: __lowercase : List[Any] =SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
54
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCamelCase_ = abspath(join(dirname(__file__), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def _lowerCAmelCase ( __magic_name__ : int ) -> Tuple: config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def _lowerCAmelCase ( __magic_name__ : int ) -> Any: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Any ) -> Any: from transformers.testing_utils import pytest_terminal_summary_main lowercase : Optional[Any] =terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> List[str]: # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: lowercase : Optional[int] =0 # Doctest custom flag to ignore output. UpperCamelCase_ = doctest.register_optionflag("""IGNORE_RESULT""") UpperCamelCase_ = doctest.OutputChecker class __SCREAMING_SNAKE_CASE ( lowercase__ ): def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ): '''simple docstring''' if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_ = CustomOutputChecker UpperCamelCase_ = HfDoctestModule UpperCamelCase_ = HfDocTestParser
92
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : Any ,A : Optional[int] ,A : Optional[int]=7 ,A : Optional[Any]=3 ,A : List[str]=18 ,A : Any=30 ,A : Tuple=4_00 ,A : Union[str, Any]=True ,A : Optional[Any]=32 ,A : Union[str, Any]=True ,): __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size_divisor __A = do_rescale def UpperCamelCase_ ( self : Union[str, Any] ): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case_ = GLPNImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self : int ): __A = GLPNImageProcessingTester(self ) @property def UpperCamelCase_ ( self : Optional[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self : Any ): __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A ,"do_resize" ) ) self.assertTrue(hasattr(A ,"size_divisor" ) ) self.assertTrue(hasattr(A ,"resample" ) ) self.assertTrue(hasattr(A ,"do_rescale" ) ) def UpperCamelCase_ ( self : str ): pass def UpperCamelCase_ ( self : Dict ): # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A ,Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) __A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def UpperCamelCase_ ( self : Optional[Any] ): # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A ) for image in image_inputs: self.assertIsInstance(A ,np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) __A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def UpperCamelCase_ ( self : int ): # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A ) for image in image_inputs: self.assertIsInstance(A ,torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) __A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
55
'''simple docstring''' from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = ['pixel_values'] def __init__( self : List[Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 8 , **UpperCAmelCase__ : str , ): '''simple docstring''' super().__init__(**UpperCAmelCase__ ) lowercase : Union[str, Any] =do_rescale lowercase : List[Any] =rescale_factor lowercase : Tuple =do_pad lowercase : List[str] =pad_size def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] ): '''simple docstring''' return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ): '''simple docstring''' lowercase , lowercase : Union[str, Any] =get_image_size(UpperCAmelCase__ ) lowercase : Tuple =(old_height // size + 1) * size - old_height lowercase : Tuple =(old_width // size + 1) * size - old_width return pad(UpperCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : Optional[Any] , ): '''simple docstring''' lowercase : int =do_rescale if do_rescale is not None else self.do_rescale lowercase : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor lowercase : int =do_pad if do_pad is not None else self.do_pad lowercase : List[Any] =pad_size if pad_size is not None else self.pad_size lowercase : Any =make_list_of_images(UpperCAmelCase__ ) if not valid_images(UpperCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. lowercase : Dict =[to_numpy_array(UpperCAmelCase__ ) for image in images] if do_rescale: lowercase : Tuple =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images] if do_pad: lowercase : Union[str, Any] =[self.pad(UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images] lowercase : Dict =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images] lowercase : Any ={'''pixel_values''': images} return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
92
0
'''simple docstring''' from __future__ import annotations import queue class _lowercase : def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> int: __snake_case = data __snake_case = None __snake_case = None def _a () -> TreeNode: """simple docstring""" print('\n********Press N to stop entering at any point of time********\n' ) __snake_case = input('Enter the value of the root node: ' ).strip().lower() __snake_case = queue.Queue() __snake_case = TreeNode(int(lowercase__ ) ) q.put(lowercase__ ) while not q.empty(): __snake_case = q.get() __snake_case = f'Enter the left node of {node_found.data}: ' __snake_case = input(lowercase__ ).strip().lower() or 'n' if check == "n": return tree_node __snake_case = TreeNode(int(lowercase__ ) ) __snake_case = left_node q.put(lowercase__ ) __snake_case = f'Enter the right node of {node_found.data}: ' __snake_case = input(lowercase__ ).strip().lower() or 'n' if check == "n": return tree_node __snake_case = TreeNode(int(lowercase__ ) ) __snake_case = right_node q.put(lowercase__ ) raise def _a (lowercase__ : TreeNode ) -> None: """simple docstring""" if not isinstance(lowercase__ , lowercase__ ) or not node: return print(node.data , end=',' ) pre_order(node.left ) pre_order(node.right ) def _a (lowercase__ : TreeNode ) -> None: """simple docstring""" if not isinstance(lowercase__ , lowercase__ ) or not node: return in_order(node.left ) print(node.data , end=',' ) in_order(node.right ) def _a (lowercase__ : TreeNode ) -> None: """simple docstring""" if not isinstance(lowercase__ , lowercase__ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=',' ) def _a (lowercase__ : TreeNode ) -> None: """simple docstring""" if not isinstance(lowercase__ , lowercase__ ) or not node: return __snake_case = queue.Queue() q.put(lowercase__ ) while not q.empty(): __snake_case = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def _a (lowercase__ : TreeNode ) -> None: """simple docstring""" if not isinstance(lowercase__ , lowercase__ ) or not node: return __snake_case = queue.Queue() q.put(lowercase__ ) while not q.empty(): __snake_case = [] while not q.empty(): __snake_case = q.get() print(node_dequeued.data , end=',' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(lowercase__ ) def _a (lowercase__ : TreeNode ) -> None: """simple docstring""" if not isinstance(lowercase__ , lowercase__ ) or not node: return __snake_case = [] __snake_case = node while n or stack: while n: # start from root node, find its left child print(n.data , end=',' ) stack.append(lowercase__ ) __snake_case = n.left # end of while means current node doesn't have left child __snake_case = stack.pop() # start to traverse its right child __snake_case = n.right def _a (lowercase__ : TreeNode ) -> None: """simple docstring""" if not isinstance(lowercase__ , lowercase__ ) or not node: return __snake_case = [] __snake_case = node while n or stack: while n: stack.append(lowercase__ ) __snake_case = n.left __snake_case = stack.pop() print(n.data , end=',' ) __snake_case = n.right def _a (lowercase__ : TreeNode ) -> None: """simple docstring""" if not isinstance(lowercase__ , lowercase__ ) or not node: return __snake_case , __snake_case = [], [] __snake_case = node stacka.append(lowercase__ ) while stacka: # to find the reversed order of post order, store it in stack2 __snake_case = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(lowercase__ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=',' ) def _a (lowercase__ : str = "" , lowercase__ : Tuple=5_0 , lowercase__ : Optional[Any]="*" ) -> str: """simple docstring""" if not s: return "\n" + width * char __snake_case , __snake_case = divmod(width - len(lowercase__ ) - 2 , 2 ) return f'{left * char} {s} {(left + extra) * char}' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("Binary Tree Traversals")) _a : TreeNode = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") print(prompt("In Order Traversal")) in_order(node) print(prompt() + "\n") print(prompt("Post Order Traversal")) post_order(node) print(prompt() + "\n") print(prompt("Level Order Traversal")) level_order(node) print(prompt() + "\n") print(prompt("Actual Level Order Traversal")) level_order_actual(node) print("*" * 50 + "\n") print(prompt("Pre Order Traversal - Iteration Version")) pre_order_iter(node) print(prompt() + "\n") print(prompt("In Order Traversal - Iteration Version")) in_order_iter(node) print(prompt() + "\n") print(prompt("Post Order Traversal - Iteration Version")) post_order_iter(node) print(prompt())
56
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""MBartTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""MBartTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """MBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """MBartForCausalLM""", """MBartForConditionalGeneration""", """MBartForQuestionAnswering""", """MBartForSequenceClassification""", """MBartModel""", """MBartPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """TFMBartForConditionalGeneration""", """TFMBartModel""", """TFMBartPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """FlaxMBartForConditionalGeneration""", """FlaxMBartForQuestionAnswering""", """FlaxMBartForSequenceClassification""", """FlaxMBartModel""", """FlaxMBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
0
import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase: """simple docstring""" @staticmethod def _a ( *_lowerCamelCase , **_lowerCamelCase ): pass @is_pipeline_test @require_torch @require_vision class _lowerCAmelCase( unittest.TestCase ): """simple docstring""" a : List[Any] =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): UpperCamelCase_: str = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' ) UpperCamelCase_: Optional[Any] = [ { 'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'question': 'How many cats are there?', }, { 'image': './tests/fixtures/tests_samples/COCO/000000039769.png', 'question': 'How many cats are there?', }, ] return vqa_pipeline, examples def _a ( self , _lowerCamelCase , _lowerCamelCase ): UpperCamelCase_: Optional[int] = vqa_pipeline(_lowerCamelCase , top_k=1 ) self.assertEqual( _lowerCamelCase , [ [{'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}], [{'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}], ] , ) @require_torch def _a ( self ): UpperCamelCase_: Tuple = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' ) UpperCamelCase_: Any = './tests/fixtures/tests_samples/COCO/000000039769.png' UpperCamelCase_: int = 'How many cats are there?' UpperCamelCase_: int = vqa_pipeline(image=_lowerCamelCase , question='How many cats are there?' , top_k=2 ) self.assertEqual( _lowerCamelCase , [{'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}, {'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}] ) UpperCamelCase_: Optional[Any] = vqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( _lowerCamelCase , [{'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}, {'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}] ) @slow @require_torch def _a ( self ): UpperCamelCase_: Optional[Any] = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' ) UpperCamelCase_: List[str] = './tests/fixtures/tests_samples/COCO/000000039769.png' UpperCamelCase_: str = 'How many cats are there?' UpperCamelCase_: Optional[Any] = vqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=4 ) , [{'score': 0.8_7_9_9, 'answer': '2'}, {'score': 0.2_9_6, 'answer': '1'}] ) UpperCamelCase_: Tuple = vqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=4 ) , [{'score': 0.8_7_9_9, 'answer': '2'}, {'score': 0.2_9_6, 'answer': '1'}] ) UpperCamelCase_: Optional[int] = vqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=4 ) , [[{'score': 0.8_7_9_9, 'answer': '2'}, {'score': 0.2_9_6, 'answer': '1'}]] * 2 , ) @require_tf @unittest.skip('Visual question answering not implemented in TF' ) def _a ( self ): pass
57
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCamelCase_ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
0
"""simple docstring""" import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCAmelCase : Dict = logging.get_logger(__name__) def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple ): '''simple docstring''' snake_case_ : List[str] = WavaVecaForSequenceClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase ) snake_case_ : int = downstream_dict["""projector.weight"""] snake_case_ : Optional[int] = downstream_dict["""projector.bias"""] snake_case_ : List[Any] = downstream_dict["""model.post_net.linear.weight"""] snake_case_ : Union[str, Any] = downstream_dict["""model.post_net.linear.bias"""] return model def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : str ): '''simple docstring''' snake_case_ : int = WavaVecaForAudioFrameClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase ) snake_case_ : Any = downstream_dict["""model.linear.weight"""] snake_case_ : int = downstream_dict["""model.linear.bias"""] return model def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ): '''simple docstring''' snake_case_ : Optional[int] = WavaVecaForXVector.from_pretrained(__UpperCamelCase , config=__UpperCamelCase ) snake_case_ : Any = downstream_dict["""connector.weight"""] snake_case_ : str = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): snake_case_ : Dict = downstream_dict[ F'model.framelevel_feature_extractor.module.{i}.kernel.weight' ] snake_case_ : int = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias'] snake_case_ : str = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] snake_case_ : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] snake_case_ : List[str] = downstream_dict["""objective.W"""] return model @torch.no_grad() def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ): '''simple docstring''' snake_case_ : Any = torch.load(__UpperCamelCase , map_location="""cpu""" ) snake_case_ : Any = checkpoint["""Downstream"""] snake_case_ : Optional[Any] = WavaVecaConfig.from_pretrained(__UpperCamelCase ) snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained( __UpperCamelCase , return_attention_mask=__UpperCamelCase , do_normalize=__UpperCamelCase ) snake_case_ : Optional[Any] = hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): snake_case_ : Tuple = convert_classification(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) elif arch.endswith("""ForAudioFrameClassification""" ): snake_case_ : Union[str, Any] = convert_diarization(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) elif arch.endswith("""ForXVector""" ): snake_case_ : List[str] = convert_xvector(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) else: raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' ) if hf_config.use_weighted_layer_sum: snake_case_ : List[Any] = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(__UpperCamelCase ) hf_model.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument( '''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.''' ) parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''') parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''') __lowerCAmelCase : Dict = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
58
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process UpperCamelCase_ = logging.getLogger(__name__) def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : str ) -> Union[str, Any]: return (preds == labels).mean() @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} ) lowerCamelCase_ = field(metadata={'help': 'Should contain the data files for the task.'} ) lowerCamelCase_ = field( default=1_28 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def _lowerCAmelCase ( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase : Dict =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowercase , lowercase , lowercase : List[Any] =parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , __magic_name__ ) # Set seed set_seed(training_args.seed ) try: lowercase : Any =processors[data_args.task_name]() lowercase : Optional[int] =processor.get_labels() lowercase : str =len(__magic_name__ ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase : List[str] =AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) lowercase : int =AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase : Any =AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , ) # Get datasets lowercase : int =( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowercase : Union[str, Any] =( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(__magic_name__ : EvalPrediction ) -> Dict: lowercase : Dict =np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(__magic_name__ , p.label_ids )} # Data collator lowercase : List[str] =DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowercase : Dict =Trainer( model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowercase : Optional[Any] ={} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowercase : List[Any] =trainer.evaluate() lowercase : Any =os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_master(): with open(__magic_name__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ ) writer.write('''%s = %s\n''' % (key, value) ) results.update(__magic_name__ ) return results def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
92
0
from math import ceil def lowerCAmelCase_ ( __a , __a ) -> Union[str, Any]: """simple docstring""" lowerCamelCase__: str =list(range(0 , __a ) ) lowerCamelCase__: Dict =[item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check lowerCamelCase__: List[str] =[] for i in device_map_blocks: if device_map_blocks.count(__a ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(__a ) # Missing blocks lowerCamelCase__: Optional[Any] =[i for i in blocks if i not in device_map_blocks] lowerCamelCase__: Union[str, Any] =[i for i in device_map_blocks if i not in blocks] if len(__a ) != 0: raise ValueError( "Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device." " These attention blocks were specified more than once: " + str(__a ) ) if len(__a ) != 0: raise ValueError( "There are attention blocks for this model that are not specified in the device_map. Add these attention " "blocks to a device on the device_map: " + str(__a ) ) if len(__a ) != 0: raise ValueError( "The device_map contains more attention blocks than this model has. Remove these from the device_map:" + str(__a ) ) def lowerCAmelCase_ ( __a , __a ) -> Dict: """simple docstring""" lowerCamelCase__: List[Any] =list(range(__a ) ) lowerCamelCase__: Tuple =int(ceil(n_layers / len(__a ) ) ) lowerCamelCase__: Tuple =[layers[i : i + n_blocks] for i in range(0 , __a , __a )] return dict(zip(__a , __a ) )
59
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase_ = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu""" def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Tuple=100 , __magic_name__ : Optional[int]=" " ) -> List[str]: lowercase : List[Any] =text.split(__magic_name__ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__magic_name__ ) , __magic_name__ )] def _lowerCAmelCase ( __magic_name__ : dict ) -> dict: lowercase , lowercase : int =[], [] for title, text in zip(documents['''title'''] , documents['''text'''] ): if text is not None: for passage in split_text(__magic_name__ ): titles.append(title if title is not None else '''''' ) texts.append(__magic_name__ ) return {"title": titles, "text": texts} def _lowerCAmelCase ( __magic_name__ : dict , __magic_name__ : DPRContextEncoder , __magic_name__ : DPRContextEncoderTokenizerFast ) -> dict: lowercase : Dict =ctx_tokenizer( documents['''title'''] , documents['''text'''] , truncation=__magic_name__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids'''] lowercase : Optional[int] =ctx_encoder(input_ids.to(device=__magic_name__ ) , return_dict=__magic_name__ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def _lowerCAmelCase ( __magic_name__ : "RagExampleArguments" , __magic_name__ : "ProcessingArguments" , __magic_name__ : "IndexHnswArguments" , ) -> str: ###################################### logger.info('''Step 1 - Create the dataset''' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowercase : Tuple =load_dataset( '''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowercase : Optional[int] =dataset.map(__magic_name__ , batched=__magic_name__ , num_proc=processing_args.num_proc ) # And compute the embeddings lowercase : Any =DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__magic_name__ ) lowercase : Any =DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) lowercase : Optional[int] =Features( {'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space lowercase : Optional[Any] =dataset.map( partial(__magic_name__ , ctx_encoder=__magic_name__ , ctx_tokenizer=__magic_name__ ) , batched=__magic_name__ , batch_size=processing_args.batch_size , features=__magic_name__ , ) # And finally save your dataset lowercase : Optional[Any] =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' ) dataset.save_to_disk(__magic_name__ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowercase : Union[str, Any] =faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('''embeddings''' , custom_index=__magic_name__ ) # And save the index lowercase : Dict =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' ) dataset.get_index('''embeddings''' ).save(__magic_name__ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , ) lowerCamelCase_ = field( default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , ) lowerCamelCase_ = field( default='facebook/dpr-ctx_encoder-multiset-base' , metadata={ 'help': ( 'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or' ' \'facebook/dpr-ctx_encoder-multiset-base\'' ) } , ) lowerCamelCase_ = field( default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=lowercase__ , metadata={ 'help': 'The number of processes to use to split the documents into passages. Default is single process.' } , ) lowerCamelCase_ = field( default=16 , metadata={ 'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.' } , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=7_68 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , ) lowerCamelCase_ = field( default=1_28 , metadata={ 'help': ( 'The number of bi-directional links created for every new element during the HNSW index construction.' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase_ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
92
0
from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup lowerCAmelCase_ = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l=''' def lowerCamelCase_ ( _UpperCamelCase = "mumbai" ) -> Generator[tuple[str, str], None, None]: """simple docstring""" snake_case_ : Tuple = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' ) # This attribute finds out all the specifics listed in a job for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ): snake_case_ : Tuple = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip() snake_case_ : int = job.find('''span''' , {'''class''': '''company'''} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs('''Bangalore'''), 1): print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
60
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right UpperCamelCase_ = 128022 UpperCamelCase_ = 128028 @require_sentencepiece class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = MaMaaaTokenizer lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = True def lowerCamelCase_ ( self : Dict ): '''simple docstring''' super().setUp() lowercase : Dict =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] lowercase : List[Any] =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) lowercase : List[Any] =Path(self.tmpdirname ) save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) lowercase : Tuple =MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : Any , **UpperCAmelCase__ : int ): '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Dict ): '''simple docstring''' return ( "This is a test", "This is a test", ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Tuple ='''</s>''' lowercase : Union[str, Any] =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[Any] =self.get_tokenizer() lowercase : Optional[Any] =list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''</s>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''<s>''' ) self.assertEqual(len(UpperCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip('''Skip this test while all models are still to be uploaded.''' ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Union[str, Any] =self.get_tokenizer() lowercase : str =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2, 3, 4, 5, 6] , ) lowercase : Optional[int] =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) lowercase : Tuple =tokenizer.convert_tokens_to_string(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , '''This is a test''' ) @slow def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' # fmt: off lowercase : int ={'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , ) @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): lowerCamelCase_ = 'facebook/m2m100_418M' lowerCamelCase_ = [ 'In my opinion, there are two levels of response from the French government.', 'NSA Affair Emphasizes Complete Lack of Debate on Intelligence', ] lowerCamelCase_ = [ 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', ] # fmt: off lowerCamelCase_ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2] @classmethod def lowerCamelCase_ ( cls : Optional[Any] ): '''simple docstring''' lowercase : MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' ) lowercase : Optional[int] =1 return cls def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128006 ) self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128022 ) self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128076 ) self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128063 ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : List[str] =self.tokenizer.get_vocab() self.assertEqual(len(UpperCAmelCase__ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab['''<unk>'''] , 3 ) self.assertIn(self.tokenizer.get_lang_token('''en''' ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : List[Any] ='''en''' lowercase : Optional[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids ) # fmt: off lowercase : str =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] # fmt: on lowercase : Optional[Any] =self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) lowercase : Optional[Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Any =tempfile.mkdtemp() lowercase : Tuple =self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(UpperCAmelCase__ ) lowercase : Union[str, Any] =MaMaaaTokenizer.from_pretrained(UpperCAmelCase__ ) self.assertDictEqual(new_tok.lang_token_to_id , UpperCAmelCase__ ) @require_torch def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[str] ='''en''' lowercase : int ='''fr''' lowercase : Union[str, Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , return_tensors='''pt''' ) lowercase : str =shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: lowercase : int =batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Optional[int] ='''mr''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) lowercase : Union[str, Any] ='''zh''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int ='''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) lowercase : Optional[Any] ='''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Optional[Any] =self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' ) self.assertEqual( nested_simplify(UpperCAmelCase__ ) , { # en_XX, A, test, EOS '''input_ids''': [[128022, 58, 4183, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 128006, } , )
92
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase = { 'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'], 'tokenization_mvp': ['MvpTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['MvpTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ 'MVP_PRETRAINED_MODEL_ARCHIVE_LIST', 'MvpForCausalLM', 'MvpForConditionalGeneration', 'MvpForQuestionAnswering', 'MvpForSequenceClassification', 'MvpModel', 'MvpPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
61
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int = 600851475143 ) -> int: try: lowercase : Any =int(__magic_name__ ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) lowercase : Optional[Any] =2 lowercase : Dict =0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 lowercase : Union[str, Any] =i while n % i == 0: lowercase : Optional[int] =n // i i += 1 return int(__magic_name__ ) if __name__ == "__main__": print(f'''{solution() = }''')
92
0
from ..utils import DummyObject, requires_backends class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Union[str, Any] = ['''torch''', '''torchsde'''] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ): requires_backends(self , ["torch", "torchsde"] ) @classmethod def _A ( cls : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ): requires_backends(cls , ["torch", "torchsde"] ) @classmethod def _A ( cls : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ): requires_backends(cls , ["torch", "torchsde"] )
62
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'speech_to_text_2' lowerCamelCase_ = ['past_key_values'] lowerCamelCase_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : int , UpperCAmelCase__ : Dict=10000 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2048 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[Any]=1024 , **UpperCAmelCase__ : Dict , ): '''simple docstring''' lowercase : List[str] =vocab_size lowercase : Optional[int] =d_model lowercase : Optional[Any] =decoder_ffn_dim lowercase : Any =decoder_layers lowercase : Dict =decoder_attention_heads lowercase : List[Any] =dropout lowercase : List[Any] =attention_dropout lowercase : Any =activation_dropout lowercase : Optional[Any] =activation_function lowercase : Optional[int] =init_std lowercase : Dict =decoder_layerdrop lowercase : Optional[int] =use_cache lowercase : Optional[Any] =decoder_layers lowercase : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True lowercase : str =max_target_positions super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
92
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a : List[str] = { "configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : int = ["RemBertTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[Any] = ["RemBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : int = [ "REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RemBertForCausalLM", "RemBertForMaskedLM", "RemBertForMultipleChoice", "RemBertForQuestionAnswering", "RemBertForSequenceClassification", "RemBertForTokenClassification", "RemBertLayer", "RemBertModel", "RemBertPreTrainedModel", "load_tf_weights_in_rembert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ "TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRemBertForCausalLM", "TFRemBertForMaskedLM", "TFRemBertForMultipleChoice", "TFRemBertForQuestionAnswering", "TFRemBertForSequenceClassification", "TFRemBertForTokenClassification", "TFRemBertLayer", "TFRemBertModel", "TFRemBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
63
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=[10, 20, 30, 40] , UpperCAmelCase__ : Any=[2, 2, 3, 2] , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[Any]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Dict=[2, 3, 4] , UpperCAmelCase__ : Optional[int]=None , ): '''simple docstring''' lowercase : List[Any] =parent lowercase : Tuple =batch_size lowercase : List[str] =image_size lowercase : List[Any] =num_channels lowercase : Union[str, Any] =num_stages lowercase : int =hidden_sizes lowercase : Any =depths lowercase : Tuple =is_training lowercase : str =use_labels lowercase : List[Any] =intermediate_size lowercase : int =hidden_act lowercase : Union[str, Any] =num_labels lowercase : Optional[int] =initializer_range lowercase : int =out_features lowercase : List[str] =out_indices lowercase : str =scope def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase : Dict =None if self.use_labels: lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_labels ) lowercase : Dict =self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Any ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' lowercase : Dict =ConvNextVaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[Any] =model(UpperCAmelCase__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Dict =ConvNextVaForImageClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : str =model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Union[str, Any] =ConvNextVaBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[int] =model(UpperCAmelCase__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase : Optional[Any] =None lowercase : str =ConvNextVaBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[Any] =model(UpperCAmelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Any =self.prepare_config_and_inputs() lowercase , lowercase , lowercase : str =config_and_inputs lowercase : Any ={'''pixel_values''': pixel_values} return config, inputs_dict def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : str =self.prepare_config_and_inputs() lowercase , lowercase , lowercase : List[str] =config_and_inputs lowercase : Optional[Any] ={'''pixel_values''': pixel_values, '''labels''': labels} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCamelCase_ = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Dict =ConvNextVaModelTester(self ) lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Any ): '''simple docstring''' return @unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase , lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_with_labels() lowercase : Optional[int] =True if model_class.__name__ in [ *get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ ), ]: continue lowercase : Dict =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.train() lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) lowercase : List[Any] =model(**UpperCAmelCase__ ).loss loss.backward() def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_with_labels() lowercase : List[Any] =False lowercase : Any =True if ( model_class.__name__ in [*get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ )] or not model_class.supports_gradient_checkpointing ): continue lowercase : Any =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.gradient_checkpointing_enable() model.train() lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) lowercase : int =model(**UpperCAmelCase__ ).loss loss.backward() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Dict =model_class(UpperCAmelCase__ ) lowercase : Union[str, Any] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase : int =[*signature.parameters.keys()] lowercase : Optional[Any] =['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' def check_hidden_states_output(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ): lowercase : int =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): lowercase : Any =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase : Dict =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase : List[Any] =self.model_tester.num_stages self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : List[str] =True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase : Tuple =True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : List[Any] =ConvNextVaModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def _lowerCAmelCase ( ) -> List[Any]: lowercase : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Tuple =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(UpperCAmelCase__ ) lowercase : int =self.default_image_processor lowercase : List[str] =prepare_img() lowercase : List[Any] =preprocessor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ ) # forward pass with torch.no_grad(): lowercase : Dict =model(**UpperCAmelCase__ ) # verify the logits lowercase : Optional[Any] =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase__ ) lowercase : Tuple =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(UpperCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
92
0
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": lowercase_ : List[Any] = argparse.ArgumentParser() parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--txt2img_unclip', default='kakaobrain/karlo-v1-alpha', type=str, required=False, help='The pretrained txt2img unclip.', ) lowercase_ : Dict = parser.parse_args() lowercase_ : int = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) lowercase_ : Any = CLIPImageProcessor() lowercase_ : str = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14') lowercase_ : int = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
64
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels UpperCamelCase_ = object() # For specifying empty leaf dict `{}` UpperCamelCase_ = object() def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> Optional[int]: lowercase : Optional[Any] =tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(__magic_name__ ) - len(__magic_name__ ) + 1 ): lowercase : Union[str, Any] =[x.match(__magic_name__ ) for x, y in zip(__magic_name__ , ks[i:] )] if matches and all(__magic_name__ ): return True return False def _lowerCAmelCase ( __magic_name__ : Dict ) -> List[str]: def replace(__magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ): for rule, replacement in rules: if _match(__magic_name__ , __magic_name__ ): return replacement return val return replace def _lowerCAmelCase ( ) -> int: return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , __magic_name__ )), (("transformer", "wte", "embedding"), P('''mp''' , __magic_name__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__magic_name__ , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , __magic_name__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__magic_name__ , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , __magic_name__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _lowerCAmelCase ( __magic_name__ : str ) -> int: lowercase : int =_get_partition_rules() lowercase : Tuple =_replacement_rules(__magic_name__ ) lowercase : Any ={k: _unmatched for k in flatten_dict(__magic_name__ )} lowercase : Any ={k: replace(__magic_name__ , __magic_name__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__magic_name__ ) )
92
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __UpperCAmelCase = { 'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['MobileViTFeatureExtractor'] __UpperCAmelCase = ['MobileViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MobileViTForImageClassification', 'MobileViTForSemanticSegmentation', 'MobileViTModel', 'MobileViTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFMobileViTForImageClassification', 'TFMobileViTForSemanticSegmentation', 'TFMobileViTModel', 'TFMobileViTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
65
'''simple docstring''' from collections import defaultdict def _lowerCAmelCase ( __magic_name__ : int ) -> int: lowercase : Optional[Any] =1 lowercase : Union[str, Any] =True for v in tree[start]: if v not in visited: ret += dfs(__magic_name__ ) if ret % 2 == 0: cuts.append(__magic_name__ ) return ret def _lowerCAmelCase ( ) -> int: dfs(1 ) if __name__ == "__main__": UpperCamelCase_ , UpperCamelCase_ = 10, 9 UpperCamelCase_ = defaultdict(list) UpperCamelCase_ = {} UpperCamelCase_ = [] UpperCamelCase_ = 0 UpperCamelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
92
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCamelCase = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase_ = logging.get_logger(__name__) def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Dict: lowercase : List[str] =R'''\w+[.]\d+''' lowercase : List[str] =re.findall(__magic_name__ , __magic_name__ ) for pat in pats: lowercase : Optional[int] =key.replace(__magic_name__ , '''_'''.join(pat.split('''.''' ) ) ) return key def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> str: lowercase : Dict =pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowercase : str =pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowercase : str =pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowercase : Dict =pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowercase : Tuple =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowercase : Tuple =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowercase : str =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowercase : Optional[Any] =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowercase : Dict =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowercase : Union[str, Any] =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=42 ) -> List[str]: # Step 1: Convert pytorch tensor to numpy lowercase : Optional[Any] ={k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowercase : str =flax_model.init_weights(PRNGKey(__magic_name__ ) ) lowercase : Dict =flatten_dict(__magic_name__ ) lowercase : Dict ={} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowercase : Dict =rename_key(__magic_name__ ) lowercase : Optional[int] =tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowercase , lowercase : Any =rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowercase : Tuple =jnp.asarray(__magic_name__ ) return unflatten_dict(__magic_name__ )
92
0
import copy import random from transformers import CLIPTokenizer class A_ ( UpperCAmelCase ): """simple docstring""" def __init__( self : Optional[Any] ,*__A : Any ,**__A : Tuple ) -> Dict: super().__init__(*__A ,**__A ) _lowercase = {} def __UpperCAmelCase ( self : int ,__A : int ,*__A : List[Any] ,**__A : Any ) -> Union[str, Any]: _lowercase = super().add_tokens(__A ,*__A ,**__A ) if num_added_tokens == 0: raise ValueError( F"""The tokenizer already contains the token {placeholder_token}. Please pass a different""" ' `placeholder_token` that is not already in the tokenizer.' ) def __UpperCAmelCase ( self : Union[str, Any] ,__A : Union[str, Any] ,*__A : Dict ,__A : List[Any]=1 ,**__A : str ) -> Optional[int]: _lowercase = [] if num_vec_per_token == 1: self.try_adding_tokens(__A ,*__A ,**__A ) output.append(__A ) else: _lowercase = [] for i in range(__A ): _lowercase = placeholder_token + F"""_{i}""" self.try_adding_tokens(__A ,*__A ,**__A ) output.append(__A ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F"""The tokenizer already has placeholder token {token} that can get confused with""" F""" {placeholder_token}keep placeholder tokens independent""" ) _lowercase = output def __UpperCAmelCase ( self : str ,__A : int ,__A : Union[str, Any]=False ,__A : Union[str, Any]=1.0 ) -> Union[str, Any]: if isinstance(__A ,__A ): _lowercase = [] for i in range(len(__A ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] ,vector_shuffle=__A ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: _lowercase = self.token_map[placeholder_token] _lowercase = tokens[: 1 + int(len(__A ) * prop_tokens_to_load )] if vector_shuffle: _lowercase = copy.copy(__A ) random.shuffle(__A ) _lowercase = text.replace(__A ,' '.join(__A ) ) return text def __call__( self : Union[str, Any] ,__A : Optional[int] ,*__A : Optional[Any] ,__A : Optional[Any]=False ,__A : int=1.0 ,**__A : Dict ) -> Tuple: return super().__call__( self.replace_placeholder_tokens_in_text( __A ,vector_shuffle=__A ,prop_tokens_to_load=__A ) ,*__A ,**__A ,) def __UpperCAmelCase ( self : int ,__A : Dict ,*__A : Optional[int] ,__A : int=False ,__A : str=1.0 ,**__A : Optional[Any] ) -> List[str]: return super().encode( self.replace_placeholder_tokens_in_text( __A ,vector_shuffle=__A ,prop_tokens_to_load=__A ) ,*__A ,**__A ,)
67
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. UpperCamelCase_ = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. UpperCamelCase_ = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. UpperCamelCase_ = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, float]: lowercase : int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] ) return (item, float(__magic_name__ )) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, str]: lowercase : Any =random.randint(0 , len(__magic_name__ ) - 1 ) lowercase : Tuple =parent_a[:random_slice] + parent_a[random_slice:] lowercase : List[str] =parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] ) -> str: lowercase : Union[str, Any] =list(__magic_name__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowercase : Dict =random.choice(__magic_name__ ) return "".join(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ) -> list[str]: lowercase : Any =[] # Generate more children proportionally to the fitness score. lowercase : Dict =int(parent_a[1] * 100 ) + 1 lowercase : List[str] =10 if child_n >= 10 else child_n for _ in range(__magic_name__ ): lowercase : List[str] =population_score[random.randint(0 , __magic_name__ )][0] lowercase , lowercase : Dict =crossover(parent_a[0] , __magic_name__ ) # Append new string to the population list. pop.append(mutate(__magic_name__ , __magic_name__ ) ) pop.append(mutate(__magic_name__ , __magic_name__ ) ) return pop def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ) -> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowercase : List[str] =f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(__magic_name__ ) # Verify that the target contains no genes besides the ones inside genes variable. lowercase : Optional[int] =sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowercase : Dict =f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(__magic_name__ ) # Generate random starting population. lowercase : int =[] for _ in range(__magic_name__ ): population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) ) # Just some logs to know what the algorithms is doing. lowercase , lowercase : Optional[int] =0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(__magic_name__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowercase : List[str] =[evaluate(__magic_name__ , __magic_name__ ) for item in population] # Check if there is a matching evolution. lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowercase : Any =population[: int(N_POPULATION / 3 )] population.clear() population.extend(__magic_name__ ) # Normalize population score to be between 0 and 1. lowercase : Dict =[ (item, score / len(__magic_name__ )) for item, score in population_score ] # This is selection for i in range(__magic_name__ ): population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(__magic_name__ ) > N_POPULATION: break if __name__ == "__main__": UpperCamelCase_ = ( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) UpperCamelCase_ = list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\""" ) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
92
0
from __future__ import annotations import math from collections.abc import Callable def lowercase__ ( A_: Callable[[int | float], int | float] , A_: int | float , A_: int | float , A_: int = 100 , ) -> float: """simple docstring""" __UpperCAmelCase =x_start __UpperCAmelCase =fnc(A_ ) __UpperCAmelCase =0.0 for _ in range(A_ ): # Approximates curve as a sequence of linear lines and sums their length __UpperCAmelCase =(x_end - x_start) / steps + xa __UpperCAmelCase =fnc(A_ ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step __UpperCAmelCase =xa __UpperCAmelCase =fxa return length if __name__ == "__main__": def lowercase__ ( A_: Any ) -> Any: """simple docstring""" return math.sin(10 * x ) print("f(x) = sin(10 * x)") print("The length of the curve from x = -10 to x = 10 is:") __A = 10 while i <= 10_00_00: print(F"""With {i} steps: {line_length(f, -10, 10, i)}""") i *= 10
68
'''simple docstring''' import datasets UpperCamelCase_ = """\ @InProceedings{conneau2018xnli, author = \"Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin\", title = \"XNLI: Evaluating Cross-lingual Sentence Representations\", booktitle = \"Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing\", year = \"2018\", publisher = \"Association for Computational Linguistics\", location = \"Brussels, Belgium\", } """ UpperCamelCase_ = """\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). """ UpperCamelCase_ = """ Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: 'accuracy': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric(\"xnli\") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} """ def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Union[str, Any]: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): def lowerCamelCase_ ( self : str ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), '''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
92
0
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str: if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) __snake_case = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b" __snake_case = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b" __snake_case = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
69
'''simple docstring''' from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class __SCREAMING_SNAKE_CASE : def __init__( self : List[str] , UpperCAmelCase__ : Dict , ): '''simple docstring''' lowercase : Any =parent lowercase : Optional[int] =13 lowercase : Union[str, Any] =7 lowercase : str =30 lowercase : Optional[int] =self.seq_length + self.mem_len lowercase : Dict =15 lowercase : List[str] =True lowercase : Optional[int] =True lowercase : Tuple =99 lowercase : str =[10, 50, 80] lowercase : List[Any] =32 lowercase : Optional[int] =32 lowercase : int =4 lowercase : Any =8 lowercase : List[Any] =128 lowercase : List[str] =2 lowercase : Tuple =2 lowercase : int =None lowercase : Optional[int] =1 lowercase : int =0 lowercase : List[str] =3 lowercase : str =self.vocab_size - 1 lowercase : Tuple =0.01 def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : str =None if self.use_labels: lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Union[str, Any] =TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' random.seed(self.seed ) tf.random.set_seed(self.seed ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Tuple =TFTransfoXLModel(UpperCAmelCase__ ) lowercase , lowercase : Optional[Any] =model(UpperCAmelCase__ ).to_tuple() lowercase : List[str] ={'''input_ids''': input_ids_a, '''mems''': mems_a} lowercase , lowercase : Any =model(UpperCAmelCase__ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : int =TFTransfoXLLMHeadModel(UpperCAmelCase__ ) lowercase , lowercase : Tuple =model(UpperCAmelCase__ ).to_tuple() lowercase : Optional[Any] ={'''input_ids''': input_ids_a, '''labels''': lm_labels} lowercase , lowercase : Optional[int] =model(UpperCAmelCase__ ).to_tuple() lowercase , lowercase : List[str] =model([input_ids_a, mems_a] ).to_tuple() lowercase : int ={'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels} lowercase , lowercase : str =model(UpperCAmelCase__ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Optional[int] =TFTransfoXLForSequenceClassification(UpperCAmelCase__ ) lowercase : Union[str, Any] =model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[Any] =self.prepare_config_and_inputs() ((lowercase) , (lowercase) , (lowercase) , (lowercase)) : Optional[Any] =config_and_inputs lowercase : Union[str, Any] ={'''input_ids''': input_ids_a} return config, inputs_dict @require_tf class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) lowerCamelCase_ = () if is_tf_available() else () lowerCamelCase_ = ( { 'feature-extraction': TFTransfoXLModel, 'text-classification': TFTransfoXLForSequenceClassification, 'text-generation': TFTransfoXLLMHeadModel, 'zero-shot': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Any =TFTransfoXLModelTester(self ) lowercase : Optional[int] =ConfigTester(self , config_class=UpperCAmelCase__ , d_embed=37 ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' self.model_tester.set_seed() lowercase : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.model_tester.set_seed() lowercase : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common() lowercase : int =[TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: lowercase : str =model_class(UpperCAmelCase__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: lowercase : Union[str, Any] =model.get_output_embeddings() assert isinstance(UpperCAmelCase__ , tf.keras.layers.Layer ) lowercase : Any =model.get_bias() assert name is None else: lowercase : Optional[int] =model.get_output_embeddings() assert x is None lowercase : Optional[int] =model.get_bias() assert name is None def lowerCamelCase_ ( self : Any ): '''simple docstring''' # TODO JP: Make TransfoXL XLA compliant pass @slow def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : int =TFTransfoXLModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' ) def lowerCamelCase_ ( self : int ): '''simple docstring''' pass @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @unittest.skip('''Skip test until #12651 is resolved.''' ) @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Optional[Any] =TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' ) # fmt: off lowercase : Tuple =tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off lowercase : Optional[int] =[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> lowercase : int =model.generate(UpperCAmelCase__ , max_length=200 , do_sample=UpperCAmelCase__ ) self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__ )
92
0
from PIL import Image def _SCREAMING_SNAKE_CASE ( lowercase : Image ): '''simple docstring''' lowerCamelCase_ , lowerCamelCase_ = image.size lowerCamelCase_ = 0 lowerCamelCase_ = image.load() for i in range(lowercase ): for j in range(lowercase ): lowerCamelCase_ = pixels[j, i] mean += pixel mean //= width * height for j in range(lowercase ): for i in range(lowercase ): lowerCamelCase_ = 2_55 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": lowerCamelCase : Optional[Any] = mean_threshold(Image.open("path_to_image").convert("L")) image.save("output_image_path")
70
'''simple docstring''' import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __SCREAMING_SNAKE_CASE : def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Optional[Any]=36 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Tuple=None , ): '''simple docstring''' lowercase : str =parent lowercase : int =batch_size lowercase : Any =seq_length lowercase : int =is_training lowercase : str =use_input_mask lowercase : int =use_token_type_ids lowercase : Dict =use_labels lowercase : int =vocab_size lowercase : str =embedding_size lowercase : Union[str, Any] =hidden_size lowercase : Tuple =num_hidden_layers lowercase : Any =num_hidden_groups lowercase : Union[str, Any] =num_attention_heads lowercase : Any =intermediate_size lowercase : Tuple =hidden_act lowercase : Optional[int] =hidden_dropout_prob lowercase : Union[str, Any] =attention_probs_dropout_prob lowercase : List[Any] =max_position_embeddings lowercase : int =type_vocab_size lowercase : int =type_sequence_label_size lowercase : Any =initializer_range lowercase : List[Any] =num_labels lowercase : int =num_choices lowercase : Optional[int] =scope def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Optional[int] =None if self.use_input_mask: lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] ) lowercase : Dict =None if self.use_token_type_ids: lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase : Tuple =None lowercase : Any =None lowercase : Dict =None if self.use_labels: lowercase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices ) lowercase : Any =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ): '''simple docstring''' lowercase : int =AlbertModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : Dict =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : int =model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ): '''simple docstring''' lowercase : Tuple =AlbertForPreTraining(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , sentence_order_label=UpperCAmelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Tuple =AlbertForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : List[str] =AlbertForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : List[str] =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Optional[Any] =self.num_labels lowercase : Any =AlbertForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Dict =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ): '''simple docstring''' lowercase : List[Any] =self.num_labels lowercase : str =AlbertForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Optional[int] =self.num_choices lowercase : List[Any] =AlbertForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : List[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : List[str] =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Union[str, Any] =self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Dict =config_and_inputs lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase_ = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase_ = True def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=False ): '''simple docstring''' lowercase : Optional[int] =super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if return_labels: if model_class in get_values(UpperCAmelCase__ ): lowercase : Any =torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ ) lowercase : Any =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) return inputs_dict def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Tuple =AlbertModelTester(self ) lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase : Tuple =type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : str =AlbertModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : int =AlbertModel.from_pretrained('''albert-base-v2''' ) lowercase : Optional[int] =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowercase : Any =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase : Any =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] lowercase : int =torch.Size((1, 11, 768) ) self.assertEqual(output.shape , UpperCAmelCase__ ) lowercase : Union[str, Any] =torch.tensor( [[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
92
0
'''simple docstring''' import re def a__ ( _SCREAMING_SNAKE_CASE : str ) -> bool: """simple docstring""" UpperCAmelCase_ : Optional[Any] = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" ) if match := re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
71
'''simple docstring''' import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ): '''simple docstring''' if dst_width < 0 or dst_height < 0: raise ValueError('''Destination width/height should be > 0''' ) lowercase : Union[str, Any] =img lowercase : Union[str, Any] =img.shape[1] lowercase : str =img.shape[0] lowercase : Union[str, Any] =dst_width lowercase : str =dst_height lowercase : str =self.src_w / self.dst_w lowercase : Optional[Any] =self.src_h / self.dst_h lowercase : int =( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255 ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' for i in range(self.dst_h ): for j in range(self.dst_w ): lowercase : List[Any] =self.img[self.get_y(UpperCAmelCase__ )][self.get_x(UpperCAmelCase__ )] def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ): '''simple docstring''' return int(self.ratio_x * x ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int ): '''simple docstring''' return int(self.ratio_y * y ) if __name__ == "__main__": UpperCamelCase_ , UpperCamelCase_ = 800, 600 UpperCamelCase_ = imread("""image_data/lena.jpg""", 1) UpperCamelCase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output ) waitKey(0) destroyAllWindows()
92
0
'''simple docstring''' import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : int = logging.get_logger(__name__) _UpperCAmelCase : Optional[Any] = { '''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''', '''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''', } class __magic_name__ ( __SCREAMING_SNAKE_CASE ): UpperCamelCase__ = 'encodec' def __init__( self , snake_case_=[1.5, 3.0, 6.0, 12.0, 24.0] , snake_case_=2_40_00 , snake_case_=1 , snake_case_=False , snake_case_=None , snake_case_=None , snake_case_=1_28 , snake_case_=32 , snake_case_=1 , snake_case_=[8, 5, 4, 2] , snake_case_="weight_norm" , snake_case_=7 , snake_case_=7 , snake_case_=3 , snake_case_=2 , snake_case_=True , snake_case_="reflect" , snake_case_=2 , snake_case_=2 , snake_case_=1.0 , snake_case_=10_24 , snake_case_=None , snake_case_=True , **snake_case_ , ): lowercase =target_bandwidths lowercase =sampling_rate lowercase =audio_channels lowercase =normalize lowercase =chunk_length_s lowercase =overlap lowercase =hidden_size lowercase =num_filters lowercase =num_residual_layers lowercase =upsampling_ratios lowercase =norm_type lowercase =kernel_size lowercase =last_kernel_size lowercase =residual_kernel_size lowercase =dilation_growth_rate lowercase =use_causal_conv lowercase =pad_mode lowercase =compress lowercase =num_lstm_layers lowercase =trim_right_ratio lowercase =codebook_size lowercase =codebook_dim if codebook_dim is not None else hidden_size lowercase =use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' ) super().__init__(**snake_case_ ) @property def _A( self ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _A( self ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def _A( self ): lowercase =np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def _A( self ): return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
72
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float: lowercase : Any =0.0_0 lowercase : Tuple =0 for resistor in resistors: if resistor <= 0: lowercase : Dict =f'''Resistor at index {index} has a negative or zero value!''' raise ValueError(__magic_name__ ) first_sum += 1 / float(__magic_name__ ) index += 1 return 1 / first_sum def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float: lowercase : Optional[Any] =0.0_0 lowercase : int =0 for resistor in resistors: sum_r += resistor if resistor < 0: lowercase : Tuple =f'''Resistor at index {index} has a negative value!''' raise ValueError(__magic_name__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
92
0
import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness a_ : Optional[Any] = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n' a_ : List[str] = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n' a_ : Any = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n' a_ : int = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n' a_ : List[Any] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string')), 'references': datasets.Value('string'), }) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , ) def SCREAMING_SNAKE_CASE__ ( self , a , a , a=[1, 10, 100] , a=4 , a=3.0) -> str: if os.getenv('HF_ALLOW_CODE_EVAL' , 0) != "1": raise ValueError(_WARNING) if os.name == "nt": raise NotImplementedError('This metric is currently not supported on Windows.') with ThreadPoolExecutor(max_workers=a) as executor: SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = Counter() SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = defaultdict(a) for task_id, (candidates, test_case) in enumerate(zip(a , a)): for candidate in candidates: SCREAMING_SNAKE_CASE = candidate + '\n' + test_case SCREAMING_SNAKE_CASE = (test_program, timeout, task_id, completion_id[task_id]) SCREAMING_SNAKE_CASE = executor.submit(a , *a) futures.append(a) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(a): SCREAMING_SNAKE_CASE = future.result() results[result["task_id"]].append((result['completion_id'], result)) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], [] for result in results.values(): result.sort() SCREAMING_SNAKE_CASE = [r[1]['passed'] for r in result] total.append(len(a)) correct.append(sum(a)) SCREAMING_SNAKE_CASE = np.array(a) SCREAMING_SNAKE_CASE = np.array(a) SCREAMING_SNAKE_CASE = k SCREAMING_SNAKE_CASE = {f'''pass@{k}''': estimate_pass_at_k(a , a , a).mean() for k in ks if (total >= k).all()} return pass_at_k, results def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): def estimator(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1)) if isinstance(_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = itertools.repeat(_UpperCAmelCase , len(_UpperCAmelCase)) else: assert len(_UpperCAmelCase) == len(_UpperCAmelCase) SCREAMING_SNAKE_CASE = iter(_UpperCAmelCase) return np.array([estimator(int(_UpperCAmelCase) , int(_UpperCAmelCase) , _UpperCAmelCase) for n, c in zip(_UpperCAmelCase , _UpperCAmelCase)])
73
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""", """self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""", """self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""", """self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""", """self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""", """self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""", """self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""", """self_attn.rotary_emb""": """encoder.embed_positions""", """self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""", """conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""", """conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""", """conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""", """conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""", """conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""", """ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""", """ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""", """ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""", """ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""", """ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""", """ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } UpperCamelCase_ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> str: for attribute in key.split('''.''' ): lowercase : Tuple =getattr(__magic_name__ , __magic_name__ ) if weight_type is not None: lowercase : Optional[int] =getattr(__magic_name__ , __magic_name__ ).shape else: lowercase : List[Any] =hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase : Any =value elif weight_type == "weight_g": lowercase : List[Any] =value elif weight_type == "weight_v": lowercase : Union[str, Any] =value elif weight_type == "bias": lowercase : Tuple =value elif weight_type == "running_mean": lowercase : Union[str, Any] =value elif weight_type == "running_var": lowercase : str =value elif weight_type == "num_batches_tracked": lowercase : Tuple =value elif weight_type == "inv_freq": lowercase : Optional[Any] =value else: lowercase : Tuple =value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]: lowercase : Optional[int] =[] lowercase : Tuple =fairseq_model.state_dict() lowercase : List[Any] =hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): lowercase : Tuple =False if "conv_layers" in name: load_conv_layer( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , ) lowercase : List[Any] =True else: for key, mapped_key in MAPPING.items(): lowercase : Optional[int] ='''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: lowercase : Union[str, Any] =True if "*" in mapped_key: lowercase : Optional[int] =name.split(__magic_name__ )[0].split('''.''' )[-2] lowercase : List[str] =mapped_key.replace('''*''' , __magic_name__ ) if "pos_bias_u" in name: lowercase : Optional[Any] =None elif "pos_bias_v" in name: lowercase : Union[str, Any] =None elif "weight_g" in name: lowercase : Any ='''weight_g''' elif "weight_v" in name: lowercase : Tuple ='''weight_v''' elif "bias" in name: lowercase : Optional[int] ='''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase : Optional[int] ='''weight''' elif "running_mean" in name: lowercase : Union[str, Any] ='''running_mean''' elif "inv_freq" in name: lowercase : Any ='''inv_freq''' elif "running_var" in name: lowercase : Tuple ='''running_var''' elif "num_batches_tracked" in name: lowercase : Dict ='''num_batches_tracked''' else: lowercase : str =None set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) continue if not is_used: unused_weights.append(__magic_name__ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ) -> int: lowercase : Optional[Any] =full_name.split('''conv_layers.''' )[-1] lowercase : Any =name.split('''.''' ) lowercase : List[str] =int(items[0] ) lowercase : Union[str, Any] =int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase : Union[str, Any] =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase : Optional[Any] =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) lowercase : Optional[int] =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase : str =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__magic_name__ ) @torch.no_grad() def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=True ) -> Union[str, Any]: if config_path is not None: lowercase : Optional[Any] =WavaVecaConformerConfig.from_pretrained(__magic_name__ , hidden_act='''swish''' ) else: lowercase : Optional[int] =WavaVecaConformerConfig() if "rope" in checkpoint_path: lowercase : Dict ='''rotary''' if is_finetuned: if dict_path: lowercase : Optional[Any] =Dictionary.load(__magic_name__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowercase : str =target_dict.pad_index lowercase : Union[str, Any] =target_dict.bos_index lowercase : Any =target_dict.eos_index lowercase : Tuple =len(target_dict.symbols ) lowercase : str =os.path.join(__magic_name__ , '''vocab.json''' ) if not os.path.isdir(__magic_name__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__magic_name__ ) ) return os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowercase : Dict =target_dict.indices # fairseq has the <pad> and <s> switched lowercase : str =0 lowercase : List[Any] =1 with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(__magic_name__ , __magic_name__ ) lowercase : List[str] =WavaVecaCTCTokenizer( __magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__magic_name__ , ) lowercase : Optional[int] =True if config.feat_extract_norm == '''layer''' else False lowercase : str =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , ) lowercase : Tuple =WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) processor.save_pretrained(__magic_name__ ) lowercase : str =WavaVecaConformerForCTC(__magic_name__ ) else: lowercase : Tuple =WavaVecaConformerForPreTraining(__magic_name__ ) if is_finetuned: lowercase , lowercase , lowercase : Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: lowercase : Dict =argparse.Namespace(task='''audio_pretraining''' ) lowercase : Optional[int] =fairseq.tasks.setup_task(__magic_name__ ) lowercase , lowercase , lowercase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__magic_name__ ) lowercase : List[Any] =model[0].eval() recursively_load_weights(__magic_name__ , __magic_name__ , not is_finetuned ) hf_wavavec.save_pretrained(__magic_name__ ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCamelCase_ = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
92
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""", } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''mgp-str''' def __init__( self : Optional[Any] , _A : Tuple=[32, 128] , _A : List[Any]=4 , _A : str=3 , _A : Union[str, Any]=27 , _A : Optional[int]=38 , _A : Optional[int]=5_0257 , _A : Optional[int]=3_0522 , _A : Tuple=768 , _A : Any=12 , _A : str=12 , _A : int=4.0 , _A : int=True , _A : List[Any]=False , _A : Optional[Any]=1e-5 , _A : str=0.0 , _A : List[Any]=0.0 , _A : Optional[int]=0.0 , _A : Any=False , _A : Optional[Any]=0.02 , **_A : List[Any] , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : List[Any] = image_size __SCREAMING_SNAKE_CASE : str = patch_size __SCREAMING_SNAKE_CASE : List[Any] = num_channels __SCREAMING_SNAKE_CASE : Tuple = max_token_length __SCREAMING_SNAKE_CASE : Dict = num_character_labels __SCREAMING_SNAKE_CASE : Any = num_bpe_labels __SCREAMING_SNAKE_CASE : Any = num_wordpiece_labels __SCREAMING_SNAKE_CASE : Optional[int] = hidden_size __SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = mlp_ratio __SCREAMING_SNAKE_CASE : Union[str, Any] = distilled __SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps __SCREAMING_SNAKE_CASE : List[Any] = drop_rate __SCREAMING_SNAKE_CASE : Dict = qkv_bias __SCREAMING_SNAKE_CASE : List[Any] = attn_drop_rate __SCREAMING_SNAKE_CASE : Tuple = drop_path_rate __SCREAMING_SNAKE_CASE : Any = output_aa_attentions __SCREAMING_SNAKE_CASE : Tuple = initializer_range
74
'''simple docstring''' import math import flax.linen as nn import jax.numpy as jnp def _lowerCAmelCase ( __magic_name__ : jnp.ndarray , __magic_name__ : int , __magic_name__ : float = 1 , __magic_name__ : float = 1 , __magic_name__ : float = 1.0E4 , __magic_name__ : bool = False , __magic_name__ : float = 1.0 , ) -> jnp.ndarray: assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even''' lowercase : int =float(embedding_dim // 2 ) lowercase : Optional[int] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) lowercase : Any =min_timescale * jnp.exp(jnp.arange(__magic_name__ , dtype=jnp.floataa ) * -log_timescale_increment ) lowercase : List[Any] =jnp.expand_dims(__magic_name__ , 1 ) * jnp.expand_dims(__magic_name__ , 0 ) # scale embeddings lowercase : Tuple =scale * emb if flip_sin_to_cos: lowercase : Dict =jnp.concatenate([jnp.cos(__magic_name__ ), jnp.sin(__magic_name__ )] , axis=1 ) else: lowercase : Any =jnp.concatenate([jnp.sin(__magic_name__ ), jnp.cos(__magic_name__ )] , axis=1 ) lowercase : List[str] =jnp.reshape(__magic_name__ , [jnp.shape(__magic_name__ )[0], embedding_dim] ) return signal class __SCREAMING_SNAKE_CASE ( nn.Module ): lowerCamelCase_ = 32 lowerCamelCase_ = jnp.floataa @nn.compact def __call__( self : Tuple , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : List[Any] =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCAmelCase__ ) lowercase : Any =nn.silu(UpperCAmelCase__ ) lowercase : int =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCAmelCase__ ) return temb class __SCREAMING_SNAKE_CASE ( nn.Module ): lowerCamelCase_ = 32 lowerCamelCase_ = False lowerCamelCase_ = 1 @nn.compact def __call__( self : int , UpperCAmelCase__ : str ): '''simple docstring''' return get_sinusoidal_embeddings( UpperCAmelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
92
0
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def a__ ( lowerCAmelCase__ ) -> Any: if isinstance(lowerCAmelCase__ , collections.abc.Iterable ): return x return (x, x) @require_flax class lowerCamelCase_ : def lowercase_ ( self : str , _A : Union[str, Any] , _A : List[Any] ): '''simple docstring''' pass def lowercase_ ( self : Optional[int] ): '''simple docstring''' pass def lowercase_ ( self : Any ): '''simple docstring''' pass def lowercase_ ( self : List[Any] , _A : np.ndarray , _A : np.ndarray , _A : float ): '''simple docstring''' UpperCAmelCase__ : int = np.abs((a - b) ).max() self.assertLessEqual(_A , _A , f"""Difference between torch and flax is {diff} (>= {tol}).""" ) def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] , _A : Union[str, Any] , _A : Any , _A : Any=None , **_A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A ) UpperCAmelCase__ : Optional[int] = FlaxVisionTextDualEncoderModel(_A ) UpperCAmelCase__ : Union[str, Any] = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) ) def lowercase_ ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : str , _A : str , _A : int=None , **_A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.get_vision_text_model(_A , _A ) UpperCAmelCase__ : int = {'''vision_model''': vision_model, '''text_model''': text_model} UpperCAmelCase__ : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A ) UpperCAmelCase__ : Any = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowercase_ ( self : Dict , _A : Optional[int] , _A : Union[str, Any] , _A : List[str] , _A : str , _A : Optional[int]=None , **_A : Any ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.get_vision_text_model(_A , _A ) UpperCAmelCase__ : Optional[Any] = {'''vision_model''': vision_model, '''text_model''': text_model} UpperCAmelCase__ : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A ) UpperCAmelCase__ : Any = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) UpperCAmelCase__ : Dict = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A ) UpperCAmelCase__ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_A ) UpperCAmelCase__ : Optional[Any] = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) UpperCAmelCase__ : Any = after_output[0] UpperCAmelCase__ : str = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1e-3 ) def lowercase_ ( self : Tuple , _A : Dict , _A : Optional[Any] , _A : List[Any] , _A : List[Any] , _A : Dict=None , **_A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : str = self.get_vision_text_model(_A , _A ) UpperCAmelCase__ : List[str] = {'''vision_model''': vision_model, '''text_model''': text_model} UpperCAmelCase__ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A ) UpperCAmelCase__ : Optional[Any] = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A ) UpperCAmelCase__ : List[Any] = output.vision_model_output.attentions self.assertEqual(len(_A ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase__ : Dict = to_atuple(vision_model.config.image_size ) UpperCAmelCase__ : List[Any] = to_atuple(vision_model.config.patch_size ) UpperCAmelCase__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) UpperCAmelCase__ : Tuple = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) UpperCAmelCase__ : List[Any] = output.text_model_output.attentions self.assertEqual(len(_A ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowercase_ ( self : List[str] , _A : Dict , _A : Any , _A : Tuple ): '''simple docstring''' pt_model.to(_A ) pt_model.eval() # prepare inputs UpperCAmelCase__ : Any = inputs_dict UpperCAmelCase__ : Optional[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): UpperCAmelCase__ : int = pt_model(**_A ).to_tuple() UpperCAmelCase__ : List[Any] = fx_model(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(_A , pt_output.numpy() , 4e-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(_A ) UpperCAmelCase__ : int = FlaxVisionTextDualEncoderModel.from_pretrained(_A , from_pt=_A ) UpperCAmelCase__ : Tuple = fx_model_loaded(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(_A , pt_output.numpy() , 4e-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(_A ) UpperCAmelCase__ : Optional[int] = VisionTextDualEncoderModel.from_pretrained(_A , from_flax=_A ) pt_model_loaded.to(_A ) pt_model_loaded.eval() with torch.no_grad(): UpperCAmelCase__ : Any = pt_model_loaded(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(_A , pt_output_loaded.numpy() , 4e-2 ) def lowercase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : List[str] , _A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A ) UpperCAmelCase__ : Optional[int] = VisionTextDualEncoderModel(_A ) UpperCAmelCase__ : Tuple = FlaxVisionTextDualEncoderModel(_A ) UpperCAmelCase__ : Union[str, Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _A ) UpperCAmelCase__ : Union[str, Any] = fx_state self.check_pt_flax_equivalence(_A , _A , _A ) def lowercase_ ( self : List[Any] , _A : Optional[Any] , _A : int , _A : Dict ): '''simple docstring''' UpperCAmelCase__ : str = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A ) UpperCAmelCase__ : Optional[Any] = VisionTextDualEncoderModel(_A ) UpperCAmelCase__ : int = FlaxVisionTextDualEncoderModel(_A ) UpperCAmelCase__ : Optional[Any] = load_flax_weights_in_pytorch_model(_A , fx_model.params ) self.check_pt_flax_equivalence(_A , _A , _A ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_A ) def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Dict = self.prepare_config_and_inputs() self.check_save_load(**_A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : str = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_A ) @is_pt_flax_cross_test def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.prepare_config_and_inputs() UpperCAmelCase__ : Optional[int] = config_inputs_dict.pop('''vision_config''' ) UpperCAmelCase__ : List[str] = config_inputs_dict.pop('''text_config''' ) UpperCAmelCase__ : List[str] = config_inputs_dict self.check_equivalence_pt_to_flax(_A , _A , _A ) self.check_equivalence_flax_to_pt(_A , _A , _A ) @slow def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs() UpperCAmelCase__ : Union[str, Any] = model_a(**_A ) UpperCAmelCase__ : Tuple = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_A ) UpperCAmelCase__ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(_A ) UpperCAmelCase__ : str = model_a(**_A ) UpperCAmelCase__ : Optional[Any] = after_outputs[0] UpperCAmelCase__ : Dict = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1e-5 ) @require_flax class lowerCamelCase_ ( __a , unittest.TestCase ): def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=_A , text_from_pt=_A , ) UpperCAmelCase__ : List[str] = 13 UpperCAmelCase__ : int = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) UpperCAmelCase__ : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) UpperCAmelCase__ : List[str] = random_attention_mask([batch_size, 4] ) UpperCAmelCase__ : Tuple = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def lowercase_ ( self : int , _A : Union[str, Any] , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = FlaxViTModel(_A ) UpperCAmelCase__ : List[Any] = FlaxBertModel(_A ) return vision_model, text_model def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = FlaxViTModelTester(self ) UpperCAmelCase__ : List[str] = FlaxBertModelTester(self ) UpperCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs() UpperCAmelCase__ : Dict = bert_model_tester.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ : List[str] = vision_config_and_inputs UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class lowerCamelCase_ ( __a , unittest.TestCase ): def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=_A , text_from_pt=_A , ) UpperCAmelCase__ : List[Any] = 13 UpperCAmelCase__ : Dict = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) UpperCAmelCase__ : Optional[int] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) UpperCAmelCase__ : Tuple = random_attention_mask([batch_size, 4] ) UpperCAmelCase__ : Any = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def lowercase_ ( self : int , _A : Any , _A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = FlaxCLIPVisionModel(_A ) UpperCAmelCase__ : int = FlaxBertModel(_A ) return vision_model, text_model def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = FlaxCLIPVisionModelTester(self ) UpperCAmelCase__ : List[str] = FlaxBertModelTester(self ) UpperCAmelCase__ : Tuple = clip_model_tester.prepare_config_and_inputs() UpperCAmelCase__ : Optional[Any] = bert_model_tester.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = vision_config_and_inputs UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class lowerCamelCase_ ( unittest.TestCase ): @slow def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : str = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 ) UpperCAmelCase__ : int = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) UpperCAmelCase__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) UpperCAmelCase__ : Optional[int] = processor( text=['''una foto di un gatto''', '''una foto di un cane'''] , images=_A , padding=_A , return_tensors='''np''' ) UpperCAmelCase__ : List[str] = model(**_A ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) UpperCAmelCase__ : Tuple = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] ) self.assertTrue(np.allclose(outputs.logits_per_image , _A , atol=1e-3 ) )
75
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) # TODO Update this UpperCamelCase_ = { """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'esm' def __init__( self : Optional[Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Optional[Any]=3072 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=1026 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : int , ): '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : Any =vocab_size lowercase : List[Any] =hidden_size lowercase : Any =num_hidden_layers lowercase : Optional[Any] =num_attention_heads lowercase : Tuple =intermediate_size lowercase : int =hidden_dropout_prob lowercase : Dict =attention_probs_dropout_prob lowercase : Optional[int] =max_position_embeddings lowercase : Union[str, Any] =initializer_range lowercase : Tuple =layer_norm_eps lowercase : Union[str, Any] =position_embedding_type lowercase : List[Any] =use_cache lowercase : Dict =emb_layer_norm_before lowercase : Optional[Any] =token_dropout lowercase : Union[str, Any] =is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) lowercase : Any =EsmFoldConfig() elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase : Optional[int] =EsmFoldConfig(**UpperCAmelCase__ ) lowercase : Union[str, Any] =esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) lowercase : int =get_default_vocab_list() else: lowercase : Tuple =vocab_list else: lowercase : Union[str, Any] =None lowercase : Dict =None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase__ ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Union[str, Any] =super().to_dict() if isinstance(self.esmfold_config , UpperCAmelCase__ ): lowercase : Optional[Any] =self.esmfold_config.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = None lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = 0 lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = 1_28 lowerCamelCase_ = None def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' if self.trunk is None: lowercase : str =TrunkConfig() elif isinstance(self.trunk , UpperCAmelCase__ ): lowercase : int =TrunkConfig(**self.trunk ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =asdict(self ) lowercase : Union[str, Any] =self.trunk.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 48 lowerCamelCase_ = 10_24 lowerCamelCase_ = 1_28 lowerCamelCase_ = 32 lowerCamelCase_ = 32 lowerCamelCase_ = 32 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = False lowerCamelCase_ = 4 lowerCamelCase_ = 1_28 lowerCamelCase_ = None def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' if self.structure_module is None: lowercase : Any =StructureModuleConfig() elif isinstance(self.structure_module , UpperCAmelCase__ ): lowercase : Union[str, Any] =StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) lowercase : str =self.sequence_state_dim // self.sequence_head_width lowercase : int =self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : List[Any] =asdict(self ) lowercase : Any =self.structure_module.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 3_84 lowerCamelCase_ = 1_28 lowerCamelCase_ = 16 lowerCamelCase_ = 1_28 lowerCamelCase_ = 12 lowerCamelCase_ = 4 lowerCamelCase_ = 8 lowerCamelCase_ = 0.1 lowerCamelCase_ = 8 lowerCamelCase_ = 1 lowerCamelCase_ = 2 lowerCamelCase_ = 7 lowerCamelCase_ = 10 lowerCamelCase_ = 1E-8 lowerCamelCase_ = 1E5 def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return asdict(self ) def _lowerCAmelCase ( ) -> Optional[int]: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
92
0
"""simple docstring""" from ...processing_utils import ProcessorMixin class UpperCAmelCase_ ( snake_case ): UpperCamelCase ="WhisperFeatureExtractor" UpperCamelCase ="WhisperTokenizer" def __init__( self , UpperCamelCase_ , UpperCamelCase_ ) -> Dict: super().__init__(UpperCamelCase_ , UpperCamelCase_ ) __lowercase : Union[str, Any] = self.feature_extractor __lowercase : Tuple = False def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True ) -> Any: return self.tokenizer.get_decoder_prompt_ids(task=UpperCamelCase_ , language=UpperCamelCase_ , no_timestamps=UpperCamelCase_ ) def __call__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*UpperCamelCase_ , **UpperCamelCase_ ) __lowercase : Optional[int] = kwargs.pop('''audio''' , UpperCamelCase_ ) __lowercase : Optional[Any] = kwargs.pop('''sampling_rate''' , UpperCamelCase_ ) __lowercase : str = kwargs.pop('''text''' , UpperCamelCase_ ) if len(UpperCamelCase_ ) > 0: __lowercase : Union[str, Any] = args[0] __lowercase : List[str] = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: __lowercase : Tuple = self.feature_extractor(UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , **UpperCamelCase_ ) if text is not None: __lowercase : List[Any] = self.tokenizer(UpperCamelCase_ , **UpperCamelCase_ ) if text is None: return inputs elif audio is None: return encodings else: __lowercase : int = encodings['''input_ids'''] return inputs def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]: return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]: return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_="np" ) -> int: return self.tokenizer.get_prompt_ids(UpperCamelCase_ , return_tensors=UpperCamelCase_ )
76
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCamelCase_ = abspath(join(dirname(__file__), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def _lowerCAmelCase ( __magic_name__ : int ) -> Tuple: config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def _lowerCAmelCase ( __magic_name__ : int ) -> Any: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Any ) -> Any: from transformers.testing_utils import pytest_terminal_summary_main lowercase : Optional[Any] =terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> List[str]: # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: lowercase : Optional[int] =0 # Doctest custom flag to ignore output. UpperCamelCase_ = doctest.register_optionflag("""IGNORE_RESULT""") UpperCamelCase_ = doctest.OutputChecker class __SCREAMING_SNAKE_CASE ( lowercase__ ): def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ): '''simple docstring''' if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_ = CustomOutputChecker UpperCamelCase_ = HfDoctestModule UpperCamelCase_ = HfDocTestParser
92
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) A = {"""vocab_file""": """spiece.model"""} A = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } A = {"""bert_for_seq_generation""": 512} class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = [] lowercase_ = ["input_ids", "attention_mask"] def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="<::::>" , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : List[Any] , ): """simple docstring""" __UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __UpperCAmelCase : Dict = vocab_file __UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(UpperCamelCase_) @property def a_ ( self : List[str]): """simple docstring""" return self.sp_model.get_piece_size() def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : int = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : int): """simple docstring""" __UpperCAmelCase : Optional[int] = self.__dict__.copy() __UpperCAmelCase : List[Any] = None return state def __setstate__( self : Optional[Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : Optional[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs"): __UpperCAmelCase : List[Any] = {} __UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def a_ ( self : Any , UpperCamelCase_ : str): """simple docstring""" return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_) def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" return self.sp_model.piece_to_id(UpperCamelCase_) def a_ ( self : Tuple , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : int = self.sp_model.IdToPiece(UpperCamelCase_) return token def a_ ( self : Dict , UpperCamelCase_ : Optional[Any]): """simple docstring""" __UpperCAmelCase : int = [] __UpperCAmelCase : Tuple = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(UpperCamelCase_) + token __UpperCAmelCase : List[Any] = [] else: current_sub_tokens.append(UpperCamelCase_) out_string += self.sp_model.decode(UpperCamelCase_) return out_string.strip() def a_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : Tuple = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , UpperCamelCase_) elif not os.path.isfile(self.vocab_file): with open(UpperCamelCase_ , "wb") as fi: __UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_) return (out_vocab_file,)
77
'''simple docstring''' from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = ['pixel_values'] def __init__( self : List[Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 8 , **UpperCAmelCase__ : str , ): '''simple docstring''' super().__init__(**UpperCAmelCase__ ) lowercase : Union[str, Any] =do_rescale lowercase : List[Any] =rescale_factor lowercase : Tuple =do_pad lowercase : List[str] =pad_size def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] ): '''simple docstring''' return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ): '''simple docstring''' lowercase , lowercase : Union[str, Any] =get_image_size(UpperCAmelCase__ ) lowercase : Tuple =(old_height // size + 1) * size - old_height lowercase : Tuple =(old_width // size + 1) * size - old_width return pad(UpperCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : Optional[Any] , ): '''simple docstring''' lowercase : int =do_rescale if do_rescale is not None else self.do_rescale lowercase : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor lowercase : int =do_pad if do_pad is not None else self.do_pad lowercase : List[Any] =pad_size if pad_size is not None else self.pad_size lowercase : Any =make_list_of_images(UpperCAmelCase__ ) if not valid_images(UpperCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. lowercase : Dict =[to_numpy_array(UpperCAmelCase__ ) for image in images] if do_rescale: lowercase : Tuple =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images] if do_pad: lowercase : Union[str, Any] =[self.pad(UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images] lowercase : Dict =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images] lowercase : Any ={'''pixel_values''': images} return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
92
0
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class __A ( unittest.TestCase ): def _lowercase (self : Optional[int] ): UpperCAmelCase_ = inspect.getfile(accelerate.test_utils ) UpperCAmelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) UpperCAmelCase_ = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) UpperCAmelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def _lowercase (self : Any ): print(f"""Found {torch.cuda.device_count()} devices.""" ) UpperCAmelCase_ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__a , env=os.environ.copy() ) @require_multi_gpu def _lowercase (self : List[Any] ): print(f"""Found {torch.cuda.device_count()} devices.""" ) UpperCAmelCase_ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(f"""Command: {cmd}""" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__a , env=os.environ.copy() ) @require_multi_gpu def _lowercase (self : Dict ): UpperCAmelCase_ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__a , env=os.environ.copy() ) @require_multi_gpu def _lowercase (self : Dict ): print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" ) UpperCAmelCase_ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(__a , env=os.environ.copy() ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Dict =Accelerator() SCREAMING_SNAKE_CASE_: Dict =(accelerator.state.process_index + 2, 10) SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.randint(0, 10, shape).to(accelerator.device) SCREAMING_SNAKE_CASE_: Optional[Any] ='' SCREAMING_SNAKE_CASE_: Union[str, Any] =accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." SCREAMING_SNAKE_CASE_: List[str] =accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." SCREAMING_SNAKE_CASE_: List[str] =accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
78
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""MBartTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""MBartTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """MBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """MBartForCausalLM""", """MBartForConditionalGeneration""", """MBartForQuestionAnswering""", """MBartForSequenceClassification""", """MBartModel""", """MBartPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """TFMBartForConditionalGeneration""", """TFMBartModel""", """TFMBartPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """FlaxMBartForConditionalGeneration""", """FlaxMBartForQuestionAnswering""", """FlaxMBartForSequenceClassification""", """FlaxMBartModel""", """FlaxMBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
0
import math SCREAMING_SNAKE_CASE__ : Union[str, Any] = 10 SCREAMING_SNAKE_CASE__ : List[Any] = 7 SCREAMING_SNAKE_CASE__ : Optional[Any] = BALLS_PER_COLOUR * NUM_COLOURS def _lowerCamelCase ( __lowerCamelCase = 20 ) -> str: '''simple docstring''' UpperCAmelCase__ : Dict = math.comb(__lowerCamelCase , __lowerCamelCase ) UpperCAmelCase__ : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __lowerCamelCase ) UpperCAmelCase__ : Tuple = NUM_COLOURS * (1 - missing_colour / total) return F"{result:.9f}" if __name__ == "__main__": print(solution(20))
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCamelCase_ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
0
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
80
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process UpperCamelCase_ = logging.getLogger(__name__) def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : str ) -> Union[str, Any]: return (preds == labels).mean() @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} ) lowerCamelCase_ = field(metadata={'help': 'Should contain the data files for the task.'} ) lowerCamelCase_ = field( default=1_28 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def _lowerCAmelCase ( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase : Dict =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowercase , lowercase , lowercase : List[Any] =parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , __magic_name__ ) # Set seed set_seed(training_args.seed ) try: lowercase : Any =processors[data_args.task_name]() lowercase : Optional[int] =processor.get_labels() lowercase : str =len(__magic_name__ ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase : List[str] =AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) lowercase : int =AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase : Any =AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , ) # Get datasets lowercase : int =( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowercase : Union[str, Any] =( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(__magic_name__ : EvalPrediction ) -> Dict: lowercase : Dict =np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(__magic_name__ , p.label_ids )} # Data collator lowercase : List[str] =DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowercase : Dict =Trainer( model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowercase : Optional[Any] ={} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowercase : List[Any] =trainer.evaluate() lowercase : Any =os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_master(): with open(__magic_name__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ ) writer.write('''%s = %s\n''' % (key, value) ) results.update(__magic_name__ ) return results def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
92
0
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node _snake_case : Tuple = 4 _snake_case : Tuple = 3 class a (_lowerCAmelCase ): """simple docstring""" pass def lowerCAmelCase_ ( __lowerCamelCase ): for shard in shards: for i in range(__lowerCamelCase ): yield {"i": i, "shard": shard} def lowerCAmelCase_ ( ): __snake_case : Optional[Any] = int(os.environ["RANK"] ) __snake_case : Any = int(os.environ["WORLD_SIZE"] ) __snake_case : List[str] = ArgumentParser() parser.add_argument("--streaming" , type=__lowerCamelCase ) parser.add_argument("--local_rank" , type=__lowerCamelCase ) parser.add_argument("--num_workers" , type=__lowerCamelCase , default=0 ) __snake_case : Optional[Any] = parser.parse_args() __snake_case : List[Any] = args.streaming __snake_case : List[str] = args.num_workers __snake_case : int = {"shards": [F'shard_{shard_idx}' for shard_idx in range(__lowerCamelCase )]} __snake_case : List[Any] = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase ) if not streaming: __snake_case : int = Dataset.from_list(list(__lowerCamelCase ) ) __snake_case : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase ) __snake_case : Optional[int] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase ) __snake_case : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD __snake_case : int = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) __snake_case : int = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(F'local_size {local_size} != expected_local_size {expected_local_size}' ) if __name__ == "__main__": main()
81
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase_ = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu""" def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Tuple=100 , __magic_name__ : Optional[int]=" " ) -> List[str]: lowercase : List[Any] =text.split(__magic_name__ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__magic_name__ ) , __magic_name__ )] def _lowerCAmelCase ( __magic_name__ : dict ) -> dict: lowercase , lowercase : int =[], [] for title, text in zip(documents['''title'''] , documents['''text'''] ): if text is not None: for passage in split_text(__magic_name__ ): titles.append(title if title is not None else '''''' ) texts.append(__magic_name__ ) return {"title": titles, "text": texts} def _lowerCAmelCase ( __magic_name__ : dict , __magic_name__ : DPRContextEncoder , __magic_name__ : DPRContextEncoderTokenizerFast ) -> dict: lowercase : Dict =ctx_tokenizer( documents['''title'''] , documents['''text'''] , truncation=__magic_name__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids'''] lowercase : Optional[int] =ctx_encoder(input_ids.to(device=__magic_name__ ) , return_dict=__magic_name__ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def _lowerCAmelCase ( __magic_name__ : "RagExampleArguments" , __magic_name__ : "ProcessingArguments" , __magic_name__ : "IndexHnswArguments" , ) -> str: ###################################### logger.info('''Step 1 - Create the dataset''' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowercase : Tuple =load_dataset( '''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowercase : Optional[int] =dataset.map(__magic_name__ , batched=__magic_name__ , num_proc=processing_args.num_proc ) # And compute the embeddings lowercase : Any =DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__magic_name__ ) lowercase : Any =DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) lowercase : Optional[int] =Features( {'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space lowercase : Optional[Any] =dataset.map( partial(__magic_name__ , ctx_encoder=__magic_name__ , ctx_tokenizer=__magic_name__ ) , batched=__magic_name__ , batch_size=processing_args.batch_size , features=__magic_name__ , ) # And finally save your dataset lowercase : Optional[Any] =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' ) dataset.save_to_disk(__magic_name__ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowercase : Union[str, Any] =faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('''embeddings''' , custom_index=__magic_name__ ) # And save the index lowercase : Dict =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' ) dataset.get_index('''embeddings''' ).save(__magic_name__ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , ) lowerCamelCase_ = field( default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , ) lowerCamelCase_ = field( default='facebook/dpr-ctx_encoder-multiset-base' , metadata={ 'help': ( 'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or' ' \'facebook/dpr-ctx_encoder-multiset-base\'' ) } , ) lowerCamelCase_ = field( default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=lowercase__ , metadata={ 'help': 'The number of processes to use to split the documents into passages. Default is single process.' } , ) lowerCamelCase_ = field( default=16 , metadata={ 'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.' } , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=7_68 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , ) lowerCamelCase_ = field( default=1_28 , metadata={ 'help': ( 'The number of bi-directional links created for every new element during the HNSW index construction.' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase_ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
92
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' UpperCamelCase = StableDiffusionInstructPixaPixPipeline UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''} UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowercase__ ( self : List[str] ) -> Any: '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) UpperCAmelCase_ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase ) torch.manual_seed(0 ) UpperCAmelCase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCAmelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) UpperCAmelCase_ = CLIPTextModel(_UpperCAmelCase ) UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def lowercase__ ( self : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=0 ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("RGB" ) if str(_UpperCAmelCase ).startswith("mps" ): UpperCAmelCase_ = torch.manual_seed(_UpperCAmelCase ) else: UpperCAmelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) UpperCAmelCase_ = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "image_guidance_scale": 1, "output_type": "numpy", } return inputs def lowercase__ ( self : int ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ = self.get_dummy_components() UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase ) UpperCAmelCase_ = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCAmelCase_ = self.get_dummy_inputs(_UpperCAmelCase ) UpperCAmelCase_ = sd_pipe(**_UpperCAmelCase ).images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ = self.get_dummy_components() UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase ) UpperCAmelCase_ = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCAmelCase_ = self.get_dummy_inputs(_UpperCAmelCase ) UpperCAmelCase_ = "french fries" UpperCAmelCase_ = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self : Tuple ) -> Dict: '''simple docstring''' UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ = self.get_dummy_components() UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase ) UpperCAmelCase_ = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCAmelCase_ = self.get_dummy_inputs(_UpperCAmelCase ) UpperCAmelCase_ = [inputs["prompt"]] * 2 UpperCAmelCase_ = np.array(inputs["image"] ).astype(np.floataa ) / 255.0 UpperCAmelCase_ = torch.from_numpy(_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase ) UpperCAmelCase_ = image / 2 + 0.5 UpperCAmelCase_ = image.permute(0 , 3 , 1 , 2 ) UpperCAmelCase_ = image.repeat(2 , 1 , 1 , 1 ) UpperCAmelCase_ = sd_pipe(**_UpperCAmelCase ).images UpperCAmelCase_ = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) UpperCAmelCase_ = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self : str ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ = self.get_dummy_components() UpperCAmelCase_ = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" ) UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase ) UpperCAmelCase_ = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCAmelCase_ = self.get_dummy_inputs(_UpperCAmelCase ) UpperCAmelCase_ = sd_pipe(**_UpperCAmelCase ).images UpperCAmelCase_ = image[0, -3:, -3:, -1] UpperCAmelCase_ = [round(_UpperCAmelCase , 4 ) for x in image_slice.flatten().tolist()] print(",".join([str(_UpperCAmelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def lowercase__ ( self : int ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = self.get_dummy_components() UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase ) UpperCAmelCase_ = VaeImageProcessor(do_resize=_UpperCAmelCase , do_normalize=_UpperCAmelCase ) UpperCAmelCase_ = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCAmelCase_ = pipe(**self.get_dummy_inputs_by_type(_UpperCAmelCase , input_image_type="pt" ) )[0] UpperCAmelCase_ = components["vae"] UpperCAmelCase_ = self.get_dummy_inputs_by_type(_UpperCAmelCase , input_image_type="pt" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): UpperCAmelCase_ = vae.encode(inputs[image_param] ).latent_dist.mode() UpperCAmelCase_ = pipe(**_UpperCAmelCase )[0] UpperCAmelCase_ = np.abs(out - out_latents_inputs ).max() self.assertLess(_UpperCAmelCase , 1e-4 , "passing latents as image input generate different result from passing image" ) @slow @require_torch_gpu class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Optional[int]=0 ) -> int: '''simple docstring''' UpperCAmelCase_ = torch.manual_seed(_UpperCAmelCase ) UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" ) UpperCAmelCase_ = { "prompt": "turn him into a cyborg", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "image_guidance_scale": 1.0, "output_type": "numpy", } return inputs def lowercase__ ( self : str ) -> str: '''simple docstring''' UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=_UpperCAmelCase ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() UpperCAmelCase_ = self.get_inputs() UpperCAmelCase_ = pipe(**_UpperCAmelCase ).images UpperCAmelCase_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self : Optional[int] ) -> Dict: '''simple docstring''' UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=_UpperCAmelCase ) UpperCAmelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() UpperCAmelCase_ = self.get_inputs() UpperCAmelCase_ = pipe(**_UpperCAmelCase ).images UpperCAmelCase_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=_UpperCAmelCase ) UpperCAmelCase_ = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() UpperCAmelCase_ = self.get_inputs() UpperCAmelCase_ = pipe(**_UpperCAmelCase ).images UpperCAmelCase_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase__ ( self : Any ) -> str: '''simple docstring''' UpperCAmelCase_ = 0 def callback_fn(_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : torch.FloatTensor ) -> None: UpperCAmelCase_ = True nonlocal number_of_steps number_of_steps += 1 if step == 1: UpperCAmelCase_ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) UpperCAmelCase_ = latents[0, -3:, -3:, -1] UpperCAmelCase_ = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: UpperCAmelCase_ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) UpperCAmelCase_ = latents[0, -3:, -3:, -1] UpperCAmelCase_ = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 UpperCAmelCase_ = False UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa ) UpperCAmelCase_ = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() UpperCAmelCase_ = self.get_inputs() pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def lowercase__ ( self : Tuple ) -> Dict: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa ) UpperCAmelCase_ = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase_ = self.get_inputs() UpperCAmelCase_ = pipe(**_UpperCAmelCase ) UpperCAmelCase_ = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def lowercase__ ( self : Tuple ) -> Any: '''simple docstring''' UpperCAmelCase_ = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 UpperCAmelCase_ = inputs["image"].resize((504, 504) ) UpperCAmelCase_ = "timbrooks/instruct-pix2pix" UpperCAmelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( _UpperCAmelCase , safety_checker=_UpperCAmelCase , ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() UpperCAmelCase_ = pipe(**_UpperCAmelCase ) UpperCAmelCase_ = output.images[0] UpperCAmelCase_ = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) UpperCAmelCase_ = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
82
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right UpperCamelCase_ = 128022 UpperCamelCase_ = 128028 @require_sentencepiece class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = MaMaaaTokenizer lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = True def lowerCamelCase_ ( self : Dict ): '''simple docstring''' super().setUp() lowercase : Dict =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] lowercase : List[Any] =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) lowercase : List[Any] =Path(self.tmpdirname ) save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) lowercase : Tuple =MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : Any , **UpperCAmelCase__ : int ): '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Dict ): '''simple docstring''' return ( "This is a test", "This is a test", ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Tuple ='''</s>''' lowercase : Union[str, Any] =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[Any] =self.get_tokenizer() lowercase : Optional[Any] =list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''</s>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''<s>''' ) self.assertEqual(len(UpperCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip('''Skip this test while all models are still to be uploaded.''' ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Union[str, Any] =self.get_tokenizer() lowercase : str =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2, 3, 4, 5, 6] , ) lowercase : Optional[int] =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) lowercase : Tuple =tokenizer.convert_tokens_to_string(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , '''This is a test''' ) @slow def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' # fmt: off lowercase : int ={'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , ) @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): lowerCamelCase_ = 'facebook/m2m100_418M' lowerCamelCase_ = [ 'In my opinion, there are two levels of response from the French government.', 'NSA Affair Emphasizes Complete Lack of Debate on Intelligence', ] lowerCamelCase_ = [ 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', ] # fmt: off lowerCamelCase_ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2] @classmethod def lowerCamelCase_ ( cls : Optional[Any] ): '''simple docstring''' lowercase : MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' ) lowercase : Optional[int] =1 return cls def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128006 ) self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128022 ) self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128076 ) self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128063 ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : List[str] =self.tokenizer.get_vocab() self.assertEqual(len(UpperCAmelCase__ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab['''<unk>'''] , 3 ) self.assertIn(self.tokenizer.get_lang_token('''en''' ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : List[Any] ='''en''' lowercase : Optional[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids ) # fmt: off lowercase : str =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] # fmt: on lowercase : Optional[Any] =self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) lowercase : Optional[Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Any =tempfile.mkdtemp() lowercase : Tuple =self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(UpperCAmelCase__ ) lowercase : Union[str, Any] =MaMaaaTokenizer.from_pretrained(UpperCAmelCase__ ) self.assertDictEqual(new_tok.lang_token_to_id , UpperCAmelCase__ ) @require_torch def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[str] ='''en''' lowercase : int ='''fr''' lowercase : Union[str, Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , return_tensors='''pt''' ) lowercase : str =shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: lowercase : int =batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Optional[int] ='''mr''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) lowercase : Union[str, Any] ='''zh''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int ='''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) lowercase : Optional[Any] ='''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Optional[Any] =self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' ) self.assertEqual( nested_simplify(UpperCAmelCase__ ) , { # en_XX, A, test, EOS '''input_ids''': [[128022, 58, 4183, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 128006, } , )
92
0
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Any = inspect.getfile(accelerate.test_utils ) _lowerCamelCase : Union[str, Any] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 _lowerCamelCase : Optional[Any] = test_metrics @require_cpu def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" debug_launcher(self.test_metrics.main ) @require_single_gpu def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" self.test_metrics.main() @require_multi_gpu def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" print(f'''Found {torch.cuda.device_count()} devices.''' ) _lowerCamelCase : str = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy() )
83
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int = 600851475143 ) -> int: try: lowercase : Any =int(__magic_name__ ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) lowercase : Optional[Any] =2 lowercase : Dict =0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 lowercase : Union[str, Any] =i while n % i == 0: lowercase : Optional[int] =n // i i += 1 return int(__magic_name__ ) if __name__ == "__main__": print(f'''{solution() = }''')
92
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { '''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''', '''Salesforce/blip-vqa-capfit-large''': ( '''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json''' ), '''Salesforce/blip-image-captioning-base''': ( '''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json''' ), '''Salesforce/blip-image-captioning-large''': ( '''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json''' ), '''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''', '''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''', '''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''', '''Salesforce/blip-itm-large-flikr''': ( '''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json''' ), } class A_ ( __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : List[str] = """blip_text_model""" def __init__( self , snake_case=3_0524 , snake_case=768 , snake_case=768 , snake_case=3072 , snake_case=768 , snake_case=12 , snake_case=8 , snake_case=512 , snake_case="gelu" , snake_case=1E-12 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=3_0522 , snake_case=2 , snake_case=0 , snake_case=102 , snake_case=True , snake_case=True , **snake_case , ): super().__init__( pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , sep_token_id=snake_case , **snake_case , ) lowercase = vocab_size lowercase = hidden_size lowercase = encoder_hidden_size lowercase = intermediate_size lowercase = projection_dim lowercase = hidden_dropout_prob lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = max_position_embeddings lowercase = layer_norm_eps lowercase = hidden_act lowercase = initializer_range lowercase = attention_probs_dropout_prob lowercase = is_decoder lowercase = use_cache @classmethod def SCREAMING_SNAKE_CASE__ ( cls , snake_case , **snake_case ): cls._set_token_in_kwargs(snake_case ) lowercase , lowercase = cls.get_config_dict(snake_case , **snake_case ) # get the text config dict if we are loading from BlipConfig if config_dict.get('model_type' ) == "blip": lowercase = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case , **snake_case ) class A_ ( __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : Tuple = """blip_vision_model""" def __init__( self , snake_case=768 , snake_case=3072 , snake_case=512 , snake_case=12 , snake_case=12 , snake_case=384 , snake_case=16 , snake_case="gelu" , snake_case=1E-5 , snake_case=0.0 , snake_case=1E-10 , **snake_case , ): super().__init__(**snake_case ) lowercase = hidden_size lowercase = intermediate_size lowercase = projection_dim lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = patch_size lowercase = image_size lowercase = initializer_range lowercase = attention_dropout lowercase = layer_norm_eps lowercase = hidden_act @classmethod def SCREAMING_SNAKE_CASE__ ( cls , snake_case , **snake_case ): cls._set_token_in_kwargs(snake_case ) lowercase , lowercase = cls.get_config_dict(snake_case , **snake_case ) # get the vision config dict if we are loading from BlipConfig if config_dict.get('model_type' ) == "blip": lowercase = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case , **snake_case ) class A_ ( __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : Optional[int] = """blip""" _UpperCamelCase : Tuple = True def __init__( self , snake_case=None , snake_case=None , snake_case=512 , snake_case=2.6_592 , snake_case=256 , **snake_case , ): super().__init__(**snake_case ) if text_config is None: lowercase = {} logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' ) if vision_config is None: lowercase = {} logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' ) lowercase = BlipTextConfig(**snake_case ) lowercase = BlipVisionConfig(**snake_case ) lowercase = self.vision_config.hidden_size lowercase = projection_dim lowercase = logit_scale_init_value lowercase = 1.0 lowercase = 0.02 lowercase = image_text_hidden_size @classmethod def SCREAMING_SNAKE_CASE__ ( cls , snake_case , snake_case , **snake_case ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = copy.deepcopy(self.__dict__ ) lowercase = self.text_config.to_dict() lowercase = self.vision_config.to_dict() lowercase = self.__class__.model_type return output
84
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'speech_to_text_2' lowerCamelCase_ = ['past_key_values'] lowerCamelCase_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : int , UpperCAmelCase__ : Dict=10000 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2048 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[Any]=1024 , **UpperCAmelCase__ : Dict , ): '''simple docstring''' lowercase : List[str] =vocab_size lowercase : Optional[int] =d_model lowercase : Optional[Any] =decoder_ffn_dim lowercase : Any =decoder_layers lowercase : Dict =decoder_attention_heads lowercase : List[Any] =dropout lowercase : List[Any] =attention_dropout lowercase : Any =activation_dropout lowercase : Optional[Any] =activation_function lowercase : Optional[int] =init_std lowercase : Dict =decoder_layerdrop lowercase : Optional[int] =use_cache lowercase : Optional[Any] =decoder_layers lowercase : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True lowercase : str =max_target_positions super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
92
0
from __future__ import annotations import math def _a ( lowercase__ : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _a ( lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = str(lowercase__ ) SCREAMING_SNAKE_CASE__ : str = [n] for i in range(1 , len(lowercase__ ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def _a ( lowercase__ : int ): '''simple docstring''' if len(str(lowercase__ ) ) > 3: if not is_prime(int(str(lowercase__ )[-3:] ) ) or not is_prime(int(str(lowercase__ )[:3] ) ): return False return True def _a ( lowercase__ : int = 11 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : list[int] = [] SCREAMING_SNAKE_CASE__ : Optional[Any] = 13 while len(lowercase__ ) != count: if validate(lowercase__ ): SCREAMING_SNAKE_CASE__ : Optional[int] = list_truncated_nums(lowercase__ ) if all(is_prime(lowercase__ ) for i in list_nums ): list_truncated_primes.append(lowercase__ ) num += 2 return list_truncated_primes def _a ( ): '''simple docstring''' return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(F"""{sum(compute_truncated_primes(11)) = }""")
85
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=[10, 20, 30, 40] , UpperCAmelCase__ : Any=[2, 2, 3, 2] , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[Any]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Dict=[2, 3, 4] , UpperCAmelCase__ : Optional[int]=None , ): '''simple docstring''' lowercase : List[Any] =parent lowercase : Tuple =batch_size lowercase : List[str] =image_size lowercase : List[Any] =num_channels lowercase : Union[str, Any] =num_stages lowercase : int =hidden_sizes lowercase : Any =depths lowercase : Tuple =is_training lowercase : str =use_labels lowercase : List[Any] =intermediate_size lowercase : int =hidden_act lowercase : Union[str, Any] =num_labels lowercase : Optional[int] =initializer_range lowercase : int =out_features lowercase : List[str] =out_indices lowercase : str =scope def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase : Dict =None if self.use_labels: lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_labels ) lowercase : Dict =self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Any ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' lowercase : Dict =ConvNextVaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[Any] =model(UpperCAmelCase__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Dict =ConvNextVaForImageClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : str =model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Union[str, Any] =ConvNextVaBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[int] =model(UpperCAmelCase__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase : Optional[Any] =None lowercase : str =ConvNextVaBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[Any] =model(UpperCAmelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Any =self.prepare_config_and_inputs() lowercase , lowercase , lowercase : str =config_and_inputs lowercase : Any ={'''pixel_values''': pixel_values} return config, inputs_dict def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : str =self.prepare_config_and_inputs() lowercase , lowercase , lowercase : List[str] =config_and_inputs lowercase : Optional[Any] ={'''pixel_values''': pixel_values, '''labels''': labels} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCamelCase_ = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Dict =ConvNextVaModelTester(self ) lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Any ): '''simple docstring''' return @unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase , lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_with_labels() lowercase : Optional[int] =True if model_class.__name__ in [ *get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ ), ]: continue lowercase : Dict =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.train() lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) lowercase : List[Any] =model(**UpperCAmelCase__ ).loss loss.backward() def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_with_labels() lowercase : List[Any] =False lowercase : Any =True if ( model_class.__name__ in [*get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ )] or not model_class.supports_gradient_checkpointing ): continue lowercase : Any =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.gradient_checkpointing_enable() model.train() lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) lowercase : int =model(**UpperCAmelCase__ ).loss loss.backward() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Dict =model_class(UpperCAmelCase__ ) lowercase : Union[str, Any] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase : int =[*signature.parameters.keys()] lowercase : Optional[Any] =['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' def check_hidden_states_output(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ): lowercase : int =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): lowercase : Any =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase : Dict =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase : List[Any] =self.model_tester.num_stages self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : List[str] =True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase : Tuple =True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : List[Any] =ConvNextVaModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def _lowerCAmelCase ( ) -> List[Any]: lowercase : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Tuple =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(UpperCAmelCase__ ) lowercase : int =self.default_image_processor lowercase : List[str] =prepare_img() lowercase : List[Any] =preprocessor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ ) # forward pass with torch.no_grad(): lowercase : Dict =model(**UpperCAmelCase__ ) # verify the logits lowercase : Optional[Any] =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase__ ) lowercase : Tuple =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(UpperCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
92
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a :Any = { 'configuration_clap': [ 'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST', 'ClapAudioConfig', 'ClapConfig', 'ClapTextConfig', ], 'processing_clap': ['ClapProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Any = [ 'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST', 'ClapModel', 'ClapPreTrainedModel', 'ClapTextModel', 'ClapTextModelWithProjection', 'ClapAudioModel', 'ClapAudioModelWithProjection', ] __a :List[Any] = ['ClapFeatureExtractor'] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys __a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
86
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels UpperCamelCase_ = object() # For specifying empty leaf dict `{}` UpperCamelCase_ = object() def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> Optional[int]: lowercase : Optional[Any] =tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(__magic_name__ ) - len(__magic_name__ ) + 1 ): lowercase : Union[str, Any] =[x.match(__magic_name__ ) for x, y in zip(__magic_name__ , ks[i:] )] if matches and all(__magic_name__ ): return True return False def _lowerCAmelCase ( __magic_name__ : Dict ) -> List[str]: def replace(__magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ): for rule, replacement in rules: if _match(__magic_name__ , __magic_name__ ): return replacement return val return replace def _lowerCAmelCase ( ) -> int: return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , __magic_name__ )), (("transformer", "wte", "embedding"), P('''mp''' , __magic_name__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__magic_name__ , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , __magic_name__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__magic_name__ , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , __magic_name__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _lowerCAmelCase ( __magic_name__ : str ) -> int: lowercase : int =_get_partition_rules() lowercase : Tuple =_replacement_rules(__magic_name__ ) lowercase : Any ={k: _unmatched for k in flatten_dict(__magic_name__ )} lowercase : Any ={k: replace(__magic_name__ , __magic_name__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__magic_name__ ) )
92
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : Dict = { """distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""", """distilbert-base-uncased-distilled-squad""": ( """https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json""" ), """distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""", """distilbert-base-cased-distilled-squad""": ( """https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json""" ), """distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""", """distilbert-base-multilingual-cased""": ( """https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json""" ), """distilbert-base-uncased-finetuned-sst-2-english""": ( """https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json""" ), } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''distilbert''' UpperCAmelCase__ = { '''hidden_size''': '''dim''', '''num_attention_heads''': '''n_heads''', '''num_hidden_layers''': '''n_layers''', } def __init__( self : int , UpperCAmelCase__ : Dict=30_522 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : int=False , UpperCAmelCase__ : List[str]=6 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : List[Any]=4 * 768 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : List[str]=0.2 , UpperCAmelCase__ : List[str]=0 , **UpperCAmelCase__ : Optional[Any] , ) ->str: '''simple docstring''' A__ = vocab_size A__ = max_position_embeddings A__ = sinusoidal_pos_embds A__ = n_layers A__ = n_heads A__ = dim A__ = hidden_dim A__ = dropout A__ = attention_dropout A__ = activation A__ = initializer_range A__ = qa_dropout A__ = seq_classif_dropout super().__init__(**UpperCAmelCase__ , pad_token_id=UpperCAmelCase__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE ( self : int) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": A__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A__ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ])
87
'''simple docstring''' from collections import defaultdict def _lowerCAmelCase ( __magic_name__ : int ) -> int: lowercase : Optional[Any] =1 lowercase : Union[str, Any] =True for v in tree[start]: if v not in visited: ret += dfs(__magic_name__ ) if ret % 2 == 0: cuts.append(__magic_name__ ) return ret def _lowerCAmelCase ( ) -> int: dfs(1 ) if __name__ == "__main__": UpperCamelCase_ , UpperCamelCase_ = 10, 9 UpperCamelCase_ = defaultdict(list) UpperCamelCase_ = {} UpperCamelCase_ = [] UpperCamelCase_ = 0 UpperCamelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
92
0
"""simple docstring""" import math def _snake_case ( __snake_case : int ): """simple docstring""" _lowerCamelCase : Optional[Any] = 0 _lowerCamelCase : int = 0 while num > 0: _lowerCamelCase : Any = num % 8 _lowerCamelCase : int = octal + (remainder * math.floor(math.pow(10 , __snake_case ) )) counter += 1 _lowerCamelCase : str = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return F'0o{int(__snake_case )}' def _snake_case ( ): """simple docstring""" print("""\n2 in octal is:""" ) print(decimal_to_octal(2 ) ) # = 2 print("""\n8 in octal is:""" ) print(decimal_to_octal(8 ) ) # = 10 print("""\n65 in octal is:""" ) print(decimal_to_octal(65 ) ) # = 101 print("""\n216 in octal is:""" ) print(decimal_to_octal(216 ) ) # = 330 print("""\n512 in octal is:""" ) print(decimal_to_octal(512 ) ) # = 1000 print("""\n""" ) if __name__ == "__main__": main()
88
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase_ = logging.get_logger(__name__) def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Dict: lowercase : List[str] =R'''\w+[.]\d+''' lowercase : List[str] =re.findall(__magic_name__ , __magic_name__ ) for pat in pats: lowercase : Optional[int] =key.replace(__magic_name__ , '''_'''.join(pat.split('''.''' ) ) ) return key def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> str: lowercase : Dict =pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowercase : str =pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowercase : str =pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowercase : Dict =pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowercase : Tuple =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowercase : Tuple =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowercase : str =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowercase : Optional[Any] =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowercase : Dict =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowercase : Union[str, Any] =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=42 ) -> List[str]: # Step 1: Convert pytorch tensor to numpy lowercase : Optional[Any] ={k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowercase : str =flax_model.init_weights(PRNGKey(__magic_name__ ) ) lowercase : Dict =flatten_dict(__magic_name__ ) lowercase : Dict ={} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowercase : Dict =rename_key(__magic_name__ ) lowercase : Optional[int] =tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowercase , lowercase : Any =rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowercase : Tuple =jnp.asarray(__magic_name__ ) return unflatten_dict(__magic_name__ )
92
0
def UpperCamelCase_( lowerCamelCase_ ) -> int: assert column_title.isupper() _lowercase : Optional[Any] = 0 _lowercase : Any = len(lowerCamelCase_ ) - 1 _lowercase : Union[str, Any] = 0 while index >= 0: _lowercase : Optional[int] = (ord(column_title[index] ) - 64) * pow(26 , lowerCamelCase_ ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
89
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. UpperCamelCase_ = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. UpperCamelCase_ = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. UpperCamelCase_ = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, float]: lowercase : int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] ) return (item, float(__magic_name__ )) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, str]: lowercase : Any =random.randint(0 , len(__magic_name__ ) - 1 ) lowercase : Tuple =parent_a[:random_slice] + parent_a[random_slice:] lowercase : List[str] =parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] ) -> str: lowercase : Union[str, Any] =list(__magic_name__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowercase : Dict =random.choice(__magic_name__ ) return "".join(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ) -> list[str]: lowercase : Any =[] # Generate more children proportionally to the fitness score. lowercase : Dict =int(parent_a[1] * 100 ) + 1 lowercase : List[str] =10 if child_n >= 10 else child_n for _ in range(__magic_name__ ): lowercase : List[str] =population_score[random.randint(0 , __magic_name__ )][0] lowercase , lowercase : Dict =crossover(parent_a[0] , __magic_name__ ) # Append new string to the population list. pop.append(mutate(__magic_name__ , __magic_name__ ) ) pop.append(mutate(__magic_name__ , __magic_name__ ) ) return pop def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ) -> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowercase : List[str] =f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(__magic_name__ ) # Verify that the target contains no genes besides the ones inside genes variable. lowercase : Optional[int] =sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowercase : Dict =f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(__magic_name__ ) # Generate random starting population. lowercase : int =[] for _ in range(__magic_name__ ): population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) ) # Just some logs to know what the algorithms is doing. lowercase , lowercase : Optional[int] =0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(__magic_name__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowercase : List[str] =[evaluate(__magic_name__ , __magic_name__ ) for item in population] # Check if there is a matching evolution. lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowercase : Any =population[: int(N_POPULATION / 3 )] population.clear() population.extend(__magic_name__ ) # Normalize population score to be between 0 and 1. lowercase : Dict =[ (item, score / len(__magic_name__ )) for item, score in population_score ] # This is selection for i in range(__magic_name__ ): population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(__magic_name__ ) > N_POPULATION: break if __name__ == "__main__": UpperCamelCase_ = ( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) UpperCamelCase_ = list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\""" ) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
92
0
'''simple docstring''' import os def _snake_case ( ) -> Any: lowerCAmelCase__ = os.path.join(os.path.dirname(A ) , '''num.txt''' ) with open(A ) as file_hand: return str(sum(int(A ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
90
'''simple docstring''' import datasets UpperCamelCase_ = """\ @InProceedings{conneau2018xnli, author = \"Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin\", title = \"XNLI: Evaluating Cross-lingual Sentence Representations\", booktitle = \"Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing\", year = \"2018\", publisher = \"Association for Computational Linguistics\", location = \"Brussels, Belgium\", } """ UpperCamelCase_ = """\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). """ UpperCamelCase_ = """ Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: 'accuracy': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric(\"xnli\") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} """ def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Union[str, Any]: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): def lowerCamelCase_ ( self : str ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), '''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
92
0
"""simple docstring""" import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def _snake_case ( snake_case__ : Tuple ): return EnvironmentCommand() def _snake_case ( snake_case__ : List[str] ): return EnvironmentCommand(args.accelerate_config_file ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any: A = parser.add_parser('env' ) download_parser.set_defaults(func=A_ ) download_parser.add_argument( '--accelerate-config_file' ,default=A_ ,help='The accelerate config file to use for the default values in the launching script.' ,) download_parser.set_defaults(func=A_ ) def __init__( self : List[Any] ,A_ : Optional[int] ,*A_ : Tuple ) -> None: A = accelerate_config_file def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: A = 'not installed' if is_safetensors_available(): import safetensors A = safetensors.__version__ elif importlib.util.find_spec('safetensors' ) is not None: import safetensors A = F'{safetensors.__version__} but is ignored because of PyTorch version too old.' A = 'not installed' A = A = 'not found' if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file A = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(A_ ): A = load_config_from_file(self._accelerate_config_file ).to_dict() A = ( '\n'.join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] ) if isinstance(A_ ,A_ ) else F'\t{accelerate_config}' ) A = 'not installed' A = 'NA' if is_torch_available(): import torch A = torch.__version__ A = torch.cuda.is_available() A = 'not installed' A = 'NA' if is_tf_available(): import tensorflow as tf A = tf.__version__ try: # deprecated in v2.1 A = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool A = bool(tf.config.list_physical_devices('GPU' ) ) A = 'not installed' A = 'not installed' A = 'not installed' A = 'NA' if is_flax_available(): import flax import jax import jaxlib A = flax.__version__ A = jax.__version__ A = jaxlib.__version__ A = jax.lib.xla_bridge.get_backend().platform A = { '`transformers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'Huggingface_hub version': huggingface_hub.__version__, 'Safetensors version': F'{safetensors_version}', 'Accelerate version': F'{accelerate_version}', 'Accelerate config': F'{accelerate_config_str}', 'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})', 'Tensorflow version (GPU?)': F'{tf_version} ({tf_cuda_available})', 'Flax version (CPU?/GPU?/TPU?)': F'{flax_version} ({jax_backend})', 'Jax version': F'{jax_version}', 'JaxLib version': F'{jaxlib_version}', 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(A_ ) ) return info @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : Union[str, Any] ) -> Dict: return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
91
'''simple docstring''' from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class __SCREAMING_SNAKE_CASE : def __init__( self : List[str] , UpperCAmelCase__ : Dict , ): '''simple docstring''' lowercase : Any =parent lowercase : Optional[int] =13 lowercase : Union[str, Any] =7 lowercase : str =30 lowercase : Optional[int] =self.seq_length + self.mem_len lowercase : Dict =15 lowercase : List[str] =True lowercase : Optional[int] =True lowercase : Tuple =99 lowercase : str =[10, 50, 80] lowercase : List[Any] =32 lowercase : Optional[int] =32 lowercase : int =4 lowercase : Any =8 lowercase : List[Any] =128 lowercase : List[str] =2 lowercase : Tuple =2 lowercase : int =None lowercase : Optional[int] =1 lowercase : int =0 lowercase : List[str] =3 lowercase : str =self.vocab_size - 1 lowercase : Tuple =0.01 def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : str =None if self.use_labels: lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Union[str, Any] =TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' random.seed(self.seed ) tf.random.set_seed(self.seed ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Tuple =TFTransfoXLModel(UpperCAmelCase__ ) lowercase , lowercase : Optional[Any] =model(UpperCAmelCase__ ).to_tuple() lowercase : List[str] ={'''input_ids''': input_ids_a, '''mems''': mems_a} lowercase , lowercase : Any =model(UpperCAmelCase__ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : int =TFTransfoXLLMHeadModel(UpperCAmelCase__ ) lowercase , lowercase : Tuple =model(UpperCAmelCase__ ).to_tuple() lowercase : Optional[Any] ={'''input_ids''': input_ids_a, '''labels''': lm_labels} lowercase , lowercase : Optional[int] =model(UpperCAmelCase__ ).to_tuple() lowercase , lowercase : List[str] =model([input_ids_a, mems_a] ).to_tuple() lowercase : int ={'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels} lowercase , lowercase : str =model(UpperCAmelCase__ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Optional[int] =TFTransfoXLForSequenceClassification(UpperCAmelCase__ ) lowercase : Union[str, Any] =model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[Any] =self.prepare_config_and_inputs() ((lowercase) , (lowercase) , (lowercase) , (lowercase)) : Optional[Any] =config_and_inputs lowercase : Union[str, Any] ={'''input_ids''': input_ids_a} return config, inputs_dict @require_tf class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) lowerCamelCase_ = () if is_tf_available() else () lowerCamelCase_ = ( { 'feature-extraction': TFTransfoXLModel, 'text-classification': TFTransfoXLForSequenceClassification, 'text-generation': TFTransfoXLLMHeadModel, 'zero-shot': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Any =TFTransfoXLModelTester(self ) lowercase : Optional[int] =ConfigTester(self , config_class=UpperCAmelCase__ , d_embed=37 ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' self.model_tester.set_seed() lowercase : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.model_tester.set_seed() lowercase : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common() lowercase : int =[TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: lowercase : str =model_class(UpperCAmelCase__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: lowercase : Union[str, Any] =model.get_output_embeddings() assert isinstance(UpperCAmelCase__ , tf.keras.layers.Layer ) lowercase : Any =model.get_bias() assert name is None else: lowercase : Optional[int] =model.get_output_embeddings() assert x is None lowercase : Optional[int] =model.get_bias() assert name is None def lowerCamelCase_ ( self : Any ): '''simple docstring''' # TODO JP: Make TransfoXL XLA compliant pass @slow def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : int =TFTransfoXLModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' ) def lowerCamelCase_ ( self : int ): '''simple docstring''' pass @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @unittest.skip('''Skip test until #12651 is resolved.''' ) @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Optional[Any] =TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' ) # fmt: off lowercase : Tuple =tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off lowercase : Optional[int] =[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> lowercase : int =model.generate(UpperCAmelCase__ , max_length=200 , do_sample=UpperCAmelCase__ ) self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__ )
92
0
"""simple docstring""" import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : """simple docstring""" @staticmethod def snake_case ( *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_torch class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" __magic_name__ :int = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[Any] = pipeline( 'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' ) lowerCAmelCase__ :int = [ { 'image': './tests/fixtures/tests_samples/COCO/000000039769.png', 'candidate_labels': ['cat', 'remote', 'couch'], } ] return object_detector, examples def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = object_detector(examples[0] , threshold=0.0 ) lowerCAmelCase__ :Optional[Any] = len(__UpperCAmelCase ) self.assertGreater(__UpperCAmelCase , 0 ) self.assertEqual( __UpperCAmelCase , [ { 'score': ANY(__UpperCAmelCase ), 'label': ANY(__UpperCAmelCase ), 'box': {'xmin': ANY(__UpperCAmelCase ), 'ymin': ANY(__UpperCAmelCase ), 'xmax': ANY(__UpperCAmelCase ), 'ymax': ANY(__UpperCAmelCase )}, } for i in range(__UpperCAmelCase ) ] , ) @require_tf @unittest.skip('Zero Shot Object Detection not implemented in TF' ) def snake_case ( self ): '''simple docstring''' pass @require_torch def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = pipeline( 'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' ) lowerCAmelCase__ :str = object_detector( './tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}}, {'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}}, {'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}}, {'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}}, {'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}}, {'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}}, {'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}}, {'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 6_7, 'ymin': 2_7_4, 'xmax': 9_3, 'ymax': 2_9_7}}, {'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}}, ] , ) lowerCAmelCase__ :Any = object_detector( [ { 'image': './tests/fixtures/tests_samples/COCO/000000039769.png', 'candidate_labels': ['cat', 'remote', 'couch'], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}}, {'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}}, {'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}}, {'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}}, {'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}}, {'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}}, {'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}}, {'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 6_7, 'ymin': 2_7_4, 'xmax': 9_3, 'ymax': 2_9_7}}, {'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}}, ] ] , ) @require_torch @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = pipeline('zero-shot-object-detection' ) lowerCAmelCase__ :str = object_detector( 'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}}, {'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}}, {'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}}, {'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}}, {'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}}, ] , ) lowerCAmelCase__ :Any = object_detector( [ { 'image': 'http://images.cocodataset.org/val2017/000000039769.jpg', 'candidate_labels': ['cat', 'remote', 'couch'], }, { 'image': 'http://images.cocodataset.org/val2017/000000039769.jpg', 'candidate_labels': ['cat', 'remote', 'couch'], }, ] , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ [ {'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}}, {'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}}, {'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}}, {'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}}, {'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}}, ], [ {'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}}, {'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}}, {'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}}, {'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}}, {'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}}, ], ] , ) @require_tf @unittest.skip('Zero Shot Object Detection not implemented in TF' ) def snake_case ( self ): '''simple docstring''' pass @require_torch @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = 0.2 lowerCAmelCase__ :Optional[Any] = pipeline('zero-shot-object-detection' ) lowerCAmelCase__ :Optional[Any] = object_detector( 'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=__UpperCAmelCase , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}}, {'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}}, {'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}}, ] , ) @require_torch @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = 2 lowerCAmelCase__ :Tuple = pipeline('zero-shot-object-detection' ) lowerCAmelCase__ :str = object_detector( 'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=__UpperCAmelCase , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}}, {'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}}, ] , )
93
'''simple docstring''' import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __SCREAMING_SNAKE_CASE : def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Optional[Any]=36 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Tuple=None , ): '''simple docstring''' lowercase : str =parent lowercase : int =batch_size lowercase : Any =seq_length lowercase : int =is_training lowercase : str =use_input_mask lowercase : int =use_token_type_ids lowercase : Dict =use_labels lowercase : int =vocab_size lowercase : str =embedding_size lowercase : Union[str, Any] =hidden_size lowercase : Tuple =num_hidden_layers lowercase : Any =num_hidden_groups lowercase : Union[str, Any] =num_attention_heads lowercase : Any =intermediate_size lowercase : Tuple =hidden_act lowercase : Optional[int] =hidden_dropout_prob lowercase : Union[str, Any] =attention_probs_dropout_prob lowercase : List[Any] =max_position_embeddings lowercase : int =type_vocab_size lowercase : int =type_sequence_label_size lowercase : Any =initializer_range lowercase : List[Any] =num_labels lowercase : int =num_choices lowercase : Optional[int] =scope def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Optional[int] =None if self.use_input_mask: lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] ) lowercase : Dict =None if self.use_token_type_ids: lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase : Tuple =None lowercase : Any =None lowercase : Dict =None if self.use_labels: lowercase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices ) lowercase : Any =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ): '''simple docstring''' lowercase : int =AlbertModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : Dict =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : int =model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ): '''simple docstring''' lowercase : Tuple =AlbertForPreTraining(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , sentence_order_label=UpperCAmelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Tuple =AlbertForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : List[str] =AlbertForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : List[str] =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Optional[Any] =self.num_labels lowercase : Any =AlbertForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Dict =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ): '''simple docstring''' lowercase : List[Any] =self.num_labels lowercase : str =AlbertForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Optional[int] =self.num_choices lowercase : List[Any] =AlbertForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : List[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : List[str] =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Union[str, Any] =self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Dict =config_and_inputs lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase_ = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase_ = True def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=False ): '''simple docstring''' lowercase : Optional[int] =super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if return_labels: if model_class in get_values(UpperCAmelCase__ ): lowercase : Any =torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ ) lowercase : Any =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) return inputs_dict def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Tuple =AlbertModelTester(self ) lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase : Tuple =type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : str =AlbertModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : int =AlbertModel.from_pretrained('''albert-base-v2''' ) lowercase : Optional[int] =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowercase : Any =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase : Any =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] lowercase : int =torch.Size((1, 11, 768) ) self.assertEqual(output.shape , UpperCAmelCase__ ) lowercase : Union[str, Any] =torch.tensor( [[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
92
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def lowercase_ ( __A : Tuple ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(__A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__A , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__A ): return [[videos]] raise ValueError(F'Could not make batched video from {videos}' ) class UpperCAmelCase_ ( __A ): """simple docstring""" UpperCamelCase_ = ['''pixel_values'''] def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : Tuple , ) -> None: '''simple docstring''' super().__init__(**UpperCAmelCase ) lowercase : Tuple =size if size is not None else {'''shortest_edge''': 224} lowercase : str =get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) lowercase : List[str] =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowercase : List[Any] =get_size_dict(UpperCAmelCase , param_name='''crop_size''' ) lowercase : Tuple =do_resize lowercase : Any =size lowercase : Optional[Any] =do_center_crop lowercase : str =crop_size lowercase : Any =resample lowercase : List[Any] =do_rescale lowercase : Dict =rescale_factor lowercase : List[str] =do_normalize lowercase : Optional[int] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase : List[Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[str] , ) -> np.ndarray: '''simple docstring''' lowercase : str =get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) if "shortest_edge" in size: lowercase : int =get_resize_output_image_size(UpperCAmelCase , size['''shortest_edge'''] , default_to_square=UpperCAmelCase ) elif "height" in size and "width" in size: lowercase : str =(size['''height'''], size['''width''']) else: raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def A__ ( self : Any , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray: '''simple docstring''' lowercase : List[str] =get_size_dict(UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=UpperCAmelCase , **UpperCAmelCase ) def A__ ( self : int , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Tuple , ) -> List[Any]: '''simple docstring''' return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def A__ ( self : Any , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : int , ) -> np.ndarray: '''simple docstring''' return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def A__ ( self : int , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowercase : Dict =to_numpy_array(UpperCAmelCase ) if do_resize: lowercase : Union[str, Any] =self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) if do_center_crop: lowercase : Optional[Any] =self.center_crop(UpperCAmelCase , size=UpperCAmelCase ) if do_rescale: lowercase : Dict =self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) if do_normalize: lowercase : int =self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) lowercase : Optional[Any] =to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) return image def A__ ( self : List[str] , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Optional[int] , ) -> PIL.Image.Image: '''simple docstring''' lowercase : Any =do_resize if do_resize is not None else self.do_resize lowercase : Union[str, Any] =resample if resample is not None else self.resample lowercase : Tuple =do_center_crop if do_center_crop is not None else self.do_center_crop lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale lowercase : str =rescale_factor if rescale_factor is not None else self.rescale_factor lowercase : List[str] =do_normalize if do_normalize is not None else self.do_normalize lowercase : List[str] =image_mean if image_mean is not None else self.image_mean lowercase : Optional[int] =image_std if image_std is not None else self.image_std lowercase : Optional[Any] =size if size is not None else self.size lowercase : Any =get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) lowercase : Union[str, Any] =crop_size if crop_size is not None else self.crop_size lowercase : Optional[int] =get_size_dict(UpperCAmelCase , param_name='''crop_size''' ) if not valid_images(UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) lowercase : List[str] =make_batched(UpperCAmelCase ) lowercase : Union[str, Any] =[ [ self._preprocess_image( image=UpperCAmelCase , do_resize=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , do_center_crop=UpperCAmelCase , crop_size=UpperCAmelCase , do_rescale=UpperCAmelCase , rescale_factor=UpperCAmelCase , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , data_format=UpperCAmelCase , ) for img in video ] for video in videos ] lowercase : Dict ={'''pixel_values''': videos} return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
94
'''simple docstring''' import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ): '''simple docstring''' if dst_width < 0 or dst_height < 0: raise ValueError('''Destination width/height should be > 0''' ) lowercase : Union[str, Any] =img lowercase : Union[str, Any] =img.shape[1] lowercase : str =img.shape[0] lowercase : Union[str, Any] =dst_width lowercase : str =dst_height lowercase : str =self.src_w / self.dst_w lowercase : Optional[Any] =self.src_h / self.dst_h lowercase : int =( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255 ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' for i in range(self.dst_h ): for j in range(self.dst_w ): lowercase : List[Any] =self.img[self.get_y(UpperCAmelCase__ )][self.get_x(UpperCAmelCase__ )] def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ): '''simple docstring''' return int(self.ratio_x * x ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int ): '''simple docstring''' return int(self.ratio_y * y ) if __name__ == "__main__": UpperCamelCase_ , UpperCamelCase_ = 800, 600 UpperCamelCase_ = imread("""image_data/lena.jpg""", 1) UpperCamelCase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output ) waitKey(0) destroyAllWindows()
92
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class UpperCamelCase_ : def __init__( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , ) -> Any: UpperCAmelCase_ : Optional[int] = parent UpperCAmelCase_ : str = 13 UpperCAmelCase_ : Dict = 7 UpperCAmelCase_ : Dict = True UpperCAmelCase_ : Dict = True UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Optional[Any] = 99 UpperCAmelCase_ : Optional[int] = 32 UpperCAmelCase_ : Optional[int] = 2 UpperCAmelCase_ : Optional[Any] = 4 UpperCAmelCase_ : int = 37 UpperCAmelCase_ : int = "gelu" UpperCAmelCase_ : Any = 0.1 UpperCAmelCase_ : Any = 0.1 UpperCAmelCase_ : List[Any] = 512 UpperCAmelCase_ : List[str] = 16 UpperCAmelCase_ : str = 2 UpperCAmelCase_ : List[str] = 0.0_2 UpperCAmelCase_ : int = 3 UpperCAmelCase_ : List[str] = 4 UpperCAmelCase_ : Optional[Any] = None def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Any = None if self.use_input_mask: UpperCAmelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : str = None UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : str = None if self.use_labels: UpperCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : Any = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : List[str] = self.prepare_config_and_inputs() UpperCAmelCase_ : List[str] = True UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> Tuple: UpperCAmelCase_ : Tuple = TFEsmModel(config=lowerCAmelCase_ ) UpperCAmelCase_ : Any = {"input_ids": input_ids, "attention_mask": input_mask} UpperCAmelCase_ : Any = model(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = [input_ids, input_mask] UpperCAmelCase_ : Dict = model(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Optional[int] = TFEsmModel(config=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = { "input_ids": input_ids, "attention_mask": input_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = [input_ids, input_mask] UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ ) # Also check the case where encoder outputs are not passed UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int ) -> Dict: UpperCAmelCase_ : Dict = TFEsmForMaskedLM(config=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Any = self.num_labels UpperCAmelCase_ : List[Any] = TFEsmForTokenClassification(config=lowerCAmelCase_ ) UpperCAmelCase_ : str = {"input_ids": input_ids, "attention_mask": input_mask} UpperCAmelCase_ : int = model(lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: UpperCAmelCase_ : int = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase_ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class UpperCamelCase_ (__A , __A , unittest.TestCase ): __magic_name__ = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) __magic_name__ = ( { '''feature-extraction''': TFEsmModel, '''fill-mask''': TFEsmForMaskedLM, '''text-classification''': TFEsmForSequenceClassification, '''token-classification''': TFEsmForTokenClassification, '''zero-shot''': TFEsmForSequenceClassification, } if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: UpperCAmelCase_ : Any = TFEsmModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = TFEsmModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @unittest.skip("Protein models do not support embedding resizing." ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: pass @unittest.skip("Protein models do not support embedding resizing." ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: pass def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Any = model_class(lowerCAmelCase_ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer UpperCAmelCase_ : Tuple = model.get_bias() assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for k, v in name.items(): assert isinstance(lowerCAmelCase_ , tf.Variable ) else: UpperCAmelCase_ : Optional[Any] = model.get_output_embeddings() assert x is None UpperCAmelCase_ : List[Any] = model.get_bias() assert name is None @require_tf class UpperCamelCase_ (unittest.TestCase ): @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : Any = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" ) UpperCAmelCase_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCAmelCase_ : int = model(lowerCAmelCase_ )[0] UpperCAmelCase_ : Optional[Any] = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , lowerCAmelCase_ ) # compare the actual values for a slice. UpperCAmelCase_ : int = tf.constant( [ [ [8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7], [-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5], [-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> int: UpperCAmelCase_ : Tuple = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" ) UpperCAmelCase_ : List[Any] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ )[0] # compare the actual values for a slice. UpperCAmelCase_ : Optional[Any] = tf.constant( [ [ [0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9], [0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2], [0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
95
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float: lowercase : Any =0.0_0 lowercase : Tuple =0 for resistor in resistors: if resistor <= 0: lowercase : Dict =f'''Resistor at index {index} has a negative or zero value!''' raise ValueError(__magic_name__ ) first_sum += 1 / float(__magic_name__ ) index += 1 return 1 / first_sum def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float: lowercase : Optional[Any] =0.0_0 lowercase : int =0 for resistor in resistors: sum_r += resistor if resistor < 0: lowercase : Tuple =f'''Resistor at index {index} has a negative value!''' raise ValueError(__magic_name__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
92
0
"""simple docstring""" import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels __lowerCamelCase = object() # For specifying empty leaf dict `{}` __lowerCamelCase = object() def a ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple ) -> int: __magic_name__: str = tuple((re.compile(x + """$""" ) for x in qs) ) for i in range(len(__UpperCAmelCase ) - len(__UpperCAmelCase ) + 1 ): __magic_name__: Dict = [x.match(__UpperCAmelCase ) for x, y in zip(__UpperCAmelCase , ks[i:] )] if matches and all(__UpperCAmelCase ): return True return False def a ( __UpperCAmelCase : Dict ) -> Optional[Any]: def replace(__UpperCAmelCase : Any , __UpperCAmelCase : List[str] ): for rule, replacement in rules: if _match(__UpperCAmelCase , __UpperCAmelCase ): return replacement return val return replace def a ( ) -> str: return [ # embeddings (("transformer", "wpe", "embedding"), P("""mp""" , __UpperCAmelCase )), (("transformer", "wte", "embedding"), P("""mp""" , __UpperCAmelCase )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__UpperCAmelCase , """mp""" )), (("attention", "out_proj", "kernel"), P("""mp""" , __UpperCAmelCase )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__UpperCAmelCase , """mp""" )), (("mlp", "c_fc", "bias"), P("""mp""" )), (("mlp", "c_proj", "kernel"), P("""mp""" , __UpperCAmelCase )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def a ( __UpperCAmelCase : Union[str, Any] ) -> Dict: __magic_name__: Optional[Any] = _get_partition_rules() __magic_name__: Any = _replacement_rules(__UpperCAmelCase ) __magic_name__: int = {k: _unmatched for k in flatten_dict(__UpperCAmelCase )} __magic_name__: int = {k: replace(__UpperCAmelCase , __UpperCAmelCase ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__UpperCAmelCase ) )
96
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""", """self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""", """self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""", """self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""", """self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""", """self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""", """self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""", """self_attn.rotary_emb""": """encoder.embed_positions""", """self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""", """conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""", """conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""", """conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""", """conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""", """conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""", """ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""", """ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""", """ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""", """ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""", """ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""", """ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } UpperCamelCase_ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> str: for attribute in key.split('''.''' ): lowercase : Tuple =getattr(__magic_name__ , __magic_name__ ) if weight_type is not None: lowercase : Optional[int] =getattr(__magic_name__ , __magic_name__ ).shape else: lowercase : List[Any] =hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase : Any =value elif weight_type == "weight_g": lowercase : List[Any] =value elif weight_type == "weight_v": lowercase : Union[str, Any] =value elif weight_type == "bias": lowercase : Tuple =value elif weight_type == "running_mean": lowercase : Union[str, Any] =value elif weight_type == "running_var": lowercase : str =value elif weight_type == "num_batches_tracked": lowercase : Tuple =value elif weight_type == "inv_freq": lowercase : Optional[Any] =value else: lowercase : Tuple =value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]: lowercase : Optional[int] =[] lowercase : Tuple =fairseq_model.state_dict() lowercase : List[Any] =hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): lowercase : Tuple =False if "conv_layers" in name: load_conv_layer( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , ) lowercase : List[Any] =True else: for key, mapped_key in MAPPING.items(): lowercase : Optional[int] ='''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: lowercase : Union[str, Any] =True if "*" in mapped_key: lowercase : Optional[int] =name.split(__magic_name__ )[0].split('''.''' )[-2] lowercase : List[str] =mapped_key.replace('''*''' , __magic_name__ ) if "pos_bias_u" in name: lowercase : Optional[Any] =None elif "pos_bias_v" in name: lowercase : Union[str, Any] =None elif "weight_g" in name: lowercase : Any ='''weight_g''' elif "weight_v" in name: lowercase : Tuple ='''weight_v''' elif "bias" in name: lowercase : Optional[int] ='''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase : Optional[int] ='''weight''' elif "running_mean" in name: lowercase : Union[str, Any] ='''running_mean''' elif "inv_freq" in name: lowercase : Any ='''inv_freq''' elif "running_var" in name: lowercase : Tuple ='''running_var''' elif "num_batches_tracked" in name: lowercase : Dict ='''num_batches_tracked''' else: lowercase : str =None set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) continue if not is_used: unused_weights.append(__magic_name__ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ) -> int: lowercase : Optional[Any] =full_name.split('''conv_layers.''' )[-1] lowercase : Any =name.split('''.''' ) lowercase : List[str] =int(items[0] ) lowercase : Union[str, Any] =int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase : Union[str, Any] =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase : Optional[Any] =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) lowercase : Optional[int] =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase : str =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__magic_name__ ) @torch.no_grad() def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=True ) -> Union[str, Any]: if config_path is not None: lowercase : Optional[Any] =WavaVecaConformerConfig.from_pretrained(__magic_name__ , hidden_act='''swish''' ) else: lowercase : Optional[int] =WavaVecaConformerConfig() if "rope" in checkpoint_path: lowercase : Dict ='''rotary''' if is_finetuned: if dict_path: lowercase : Optional[Any] =Dictionary.load(__magic_name__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowercase : str =target_dict.pad_index lowercase : Union[str, Any] =target_dict.bos_index lowercase : Any =target_dict.eos_index lowercase : Tuple =len(target_dict.symbols ) lowercase : str =os.path.join(__magic_name__ , '''vocab.json''' ) if not os.path.isdir(__magic_name__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__magic_name__ ) ) return os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowercase : Dict =target_dict.indices # fairseq has the <pad> and <s> switched lowercase : str =0 lowercase : List[Any] =1 with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(__magic_name__ , __magic_name__ ) lowercase : List[str] =WavaVecaCTCTokenizer( __magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__magic_name__ , ) lowercase : Optional[int] =True if config.feat_extract_norm == '''layer''' else False lowercase : str =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , ) lowercase : Tuple =WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) processor.save_pretrained(__magic_name__ ) lowercase : str =WavaVecaConformerForCTC(__magic_name__ ) else: lowercase : Tuple =WavaVecaConformerForPreTraining(__magic_name__ ) if is_finetuned: lowercase , lowercase , lowercase : Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: lowercase : Dict =argparse.Namespace(task='''audio_pretraining''' ) lowercase : Optional[int] =fairseq.tasks.setup_task(__magic_name__ ) lowercase , lowercase , lowercase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__magic_name__ ) lowercase : List[Any] =model[0].eval() recursively_load_weights(__magic_name__ , __magic_name__ , not is_finetuned ) hf_wavavec.save_pretrained(__magic_name__ ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCamelCase_ = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
92
0
from __future__ import annotations __a = '#' class lowercase__: """simple docstring""" def __init__( self : Dict ) -> None: lowercase_ = {} def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : str ) -> None: lowercase_ = self._trie for char in text: if char not in trie: lowercase_ = {} lowercase_ = trie[char] lowercase_ = True def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : str ) -> tuple | list: lowercase_ = self._trie for char in prefix: if char in trie: lowercase_ = trie[char] else: return [] return self._elements(SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : dict ) -> tuple: lowercase_ = [] for c, v in d.items(): lowercase_ = [''' '''] if c == END else [(c + s) for s in self._elements(SCREAMING_SNAKE_CASE_ )] result.extend(SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) __a = Trie() __a = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal') for word in words: trie.insert_word(word) def a ( snake_case__: str ): '''simple docstring''' lowercase_ = trie.find_word(snake_case__ ) return tuple(string + word for word in suffixes ) def a ( ): '''simple docstring''' print(autocomplete_using_trie('''de''' ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
97
'''simple docstring''' import math import flax.linen as nn import jax.numpy as jnp def _lowerCAmelCase ( __magic_name__ : jnp.ndarray , __magic_name__ : int , __magic_name__ : float = 1 , __magic_name__ : float = 1 , __magic_name__ : float = 1.0E4 , __magic_name__ : bool = False , __magic_name__ : float = 1.0 , ) -> jnp.ndarray: assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even''' lowercase : int =float(embedding_dim // 2 ) lowercase : Optional[int] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) lowercase : Any =min_timescale * jnp.exp(jnp.arange(__magic_name__ , dtype=jnp.floataa ) * -log_timescale_increment ) lowercase : List[Any] =jnp.expand_dims(__magic_name__ , 1 ) * jnp.expand_dims(__magic_name__ , 0 ) # scale embeddings lowercase : Tuple =scale * emb if flip_sin_to_cos: lowercase : Dict =jnp.concatenate([jnp.cos(__magic_name__ ), jnp.sin(__magic_name__ )] , axis=1 ) else: lowercase : Any =jnp.concatenate([jnp.sin(__magic_name__ ), jnp.cos(__magic_name__ )] , axis=1 ) lowercase : List[str] =jnp.reshape(__magic_name__ , [jnp.shape(__magic_name__ )[0], embedding_dim] ) return signal class __SCREAMING_SNAKE_CASE ( nn.Module ): lowerCamelCase_ = 32 lowerCamelCase_ = jnp.floataa @nn.compact def __call__( self : Tuple , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : List[Any] =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCAmelCase__ ) lowercase : Any =nn.silu(UpperCAmelCase__ ) lowercase : int =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCAmelCase__ ) return temb class __SCREAMING_SNAKE_CASE ( nn.Module ): lowerCamelCase_ = 32 lowerCamelCase_ = False lowerCamelCase_ = 1 @nn.compact def __call__( self : int , UpperCAmelCase__ : str ): '''simple docstring''' return get_sinusoidal_embeddings( UpperCAmelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
92
0
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : int = 0 _snake_case : bool = False _snake_case : float = 3.0 class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : str ) -> Union[str, Any]: '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} ) self.assertDictEqual(MockClass(a=2 , b=lowerCAmelCase__ ).to_kwargs() , {'''a''': 2, '''b''': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} ) @require_cuda def snake_case__ ( self : int ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _UpperCamelCase = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _UpperCamelCase = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , lowerCAmelCase__ ) @require_multi_gpu def snake_case__ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() ) if __name__ == "__main__": lowercase__ : Dict = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowercase__ : List[str] = Accelerator(kwargs_handlers=[ddp_scaler]) lowercase__ : List[Any] = torch.nn.Linear(1_00, 2_00) lowercase__ : Dict = accelerator.prepare(model) # Check the values changed in kwargs lowercase__ : Any = '' lowercase__ : Union[str, Any] = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
98
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) # TODO Update this UpperCamelCase_ = { """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'esm' def __init__( self : Optional[Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Optional[Any]=3072 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=1026 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : int , ): '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : Any =vocab_size lowercase : List[Any] =hidden_size lowercase : Any =num_hidden_layers lowercase : Optional[Any] =num_attention_heads lowercase : Tuple =intermediate_size lowercase : int =hidden_dropout_prob lowercase : Dict =attention_probs_dropout_prob lowercase : Optional[int] =max_position_embeddings lowercase : Union[str, Any] =initializer_range lowercase : Tuple =layer_norm_eps lowercase : Union[str, Any] =position_embedding_type lowercase : List[Any] =use_cache lowercase : Dict =emb_layer_norm_before lowercase : Optional[Any] =token_dropout lowercase : Union[str, Any] =is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) lowercase : Any =EsmFoldConfig() elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase : Optional[int] =EsmFoldConfig(**UpperCAmelCase__ ) lowercase : Union[str, Any] =esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) lowercase : int =get_default_vocab_list() else: lowercase : Tuple =vocab_list else: lowercase : Union[str, Any] =None lowercase : Dict =None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase__ ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Union[str, Any] =super().to_dict() if isinstance(self.esmfold_config , UpperCAmelCase__ ): lowercase : Optional[Any] =self.esmfold_config.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = None lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = 0 lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = 1_28 lowerCamelCase_ = None def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' if self.trunk is None: lowercase : str =TrunkConfig() elif isinstance(self.trunk , UpperCAmelCase__ ): lowercase : int =TrunkConfig(**self.trunk ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =asdict(self ) lowercase : Union[str, Any] =self.trunk.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 48 lowerCamelCase_ = 10_24 lowerCamelCase_ = 1_28 lowerCamelCase_ = 32 lowerCamelCase_ = 32 lowerCamelCase_ = 32 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = False lowerCamelCase_ = 4 lowerCamelCase_ = 1_28 lowerCamelCase_ = None def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' if self.structure_module is None: lowercase : Any =StructureModuleConfig() elif isinstance(self.structure_module , UpperCAmelCase__ ): lowercase : Union[str, Any] =StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) lowercase : str =self.sequence_state_dim // self.sequence_head_width lowercase : int =self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : List[Any] =asdict(self ) lowercase : Any =self.structure_module.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 3_84 lowerCamelCase_ = 1_28 lowerCamelCase_ = 16 lowerCamelCase_ = 1_28 lowerCamelCase_ = 12 lowerCamelCase_ = 4 lowerCamelCase_ = 8 lowerCamelCase_ = 0.1 lowerCamelCase_ = 8 lowerCamelCase_ = 1 lowerCamelCase_ = 2 lowerCamelCase_ = 7 lowerCamelCase_ = 10 lowerCamelCase_ = 1E-8 lowerCamelCase_ = 1E5 def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return asdict(self ) def _lowerCAmelCase ( ) -> Optional[int]: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
92
0
import requests from bsa import BeautifulSoup def a (lowerCAmelCase__ , lowerCAmelCase__ ): __a = BeautifulSoup(requests.get(lowerCAmelCase__ , params=lowerCAmelCase__ ).content , """html.parser""" ) __a = soup.find("""div""" , attrs={"""class""": """gs_ri"""} ) __a = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" ) return anchors[2].get_text() if __name__ == "__main__": SCREAMING_SNAKE_CASE = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 3_0, 'pages': '3979-3990', 'year': 2_0_1_8, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
99
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCamelCase_ = abspath(join(dirname(__file__), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def _lowerCAmelCase ( __magic_name__ : int ) -> Tuple: config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def _lowerCAmelCase ( __magic_name__ : int ) -> Any: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Any ) -> Any: from transformers.testing_utils import pytest_terminal_summary_main lowercase : Optional[Any] =terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> List[str]: # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: lowercase : Optional[int] =0 # Doctest custom flag to ignore output. UpperCamelCase_ = doctest.register_optionflag("""IGNORE_RESULT""") UpperCamelCase_ = doctest.OutputChecker class __SCREAMING_SNAKE_CASE ( lowercase__ ): def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ): '''simple docstring''' if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_ = CustomOutputChecker UpperCamelCase_ = HfDoctestModule UpperCamelCase_ = HfDocTestParser
92
0
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ : str = CustomTokenizer pass
100
'''simple docstring''' from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = ['pixel_values'] def __init__( self : List[Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 8 , **UpperCAmelCase__ : str , ): '''simple docstring''' super().__init__(**UpperCAmelCase__ ) lowercase : Union[str, Any] =do_rescale lowercase : List[Any] =rescale_factor lowercase : Tuple =do_pad lowercase : List[str] =pad_size def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] ): '''simple docstring''' return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ): '''simple docstring''' lowercase , lowercase : Union[str, Any] =get_image_size(UpperCAmelCase__ ) lowercase : Tuple =(old_height // size + 1) * size - old_height lowercase : Tuple =(old_width // size + 1) * size - old_width return pad(UpperCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : Optional[Any] , ): '''simple docstring''' lowercase : int =do_rescale if do_rescale is not None else self.do_rescale lowercase : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor lowercase : int =do_pad if do_pad is not None else self.do_pad lowercase : List[Any] =pad_size if pad_size is not None else self.pad_size lowercase : Any =make_list_of_images(UpperCAmelCase__ ) if not valid_images(UpperCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. lowercase : Dict =[to_numpy_array(UpperCAmelCase__ ) for image in images] if do_rescale: lowercase : Tuple =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images] if do_pad: lowercase : Union[str, Any] =[self.pad(UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images] lowercase : Dict =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images] lowercase : Any ={'''pixel_values''': images} return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
92
0