code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = {'configuration_encoder_decoder': ['EncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['EncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['TFEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['FlaxEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
83
import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class a ( __lowerCAmelCase ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Union[str, Any]: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_input_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = relative_attention _A = position_biased_input _A = pos_att_type _A = scope def UpperCAmelCase ( self ) -> Dict: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self ) -> Optional[int]: return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _A = DebertaVaModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] _A = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] _A = model(lowerCAmelCase_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: _A = DebertaVaForMaskedLM(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: _A = self.num_labels _A = DebertaVaForSequenceClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: _A = self.num_labels _A = DebertaVaForTokenClassification(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]: _A = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: _A = DebertaVaForMultipleChoice(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self ) -> Optional[int]: _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase :int = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) lowerCamelCase :str = ( { '''feature-extraction''': DebertaVaModel, '''fill-mask''': DebertaVaForMaskedLM, '''question-answering''': DebertaVaForQuestionAnswering, '''text-classification''': DebertaVaForSequenceClassification, '''token-classification''': DebertaVaForTokenClassification, '''zero-shot''': DebertaVaForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase :str = True lowerCamelCase :Union[str, Any] = False lowerCamelCase :Optional[int] = False lowerCamelCase :List[str] = False lowerCamelCase :str = False def UpperCAmelCase ( self ) -> Optional[int]: _A = DebertaVaModelTester(self ) _A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 ) def UpperCAmelCase ( self ) -> List[str]: self.config_tester.run_common_tests() def UpperCAmelCase ( self ) -> List[str]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Dict: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> int: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Dict: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Optional[int]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ ) @slow def UpperCAmelCase ( self ) -> Any: for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = DebertaVaModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class a ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason="""Model not available yet""" ) def UpperCAmelCase ( self ) -> int: pass @slow def UpperCAmelCase ( self ) -> Optional[Any]: _A = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" ) _A = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] ) _A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0] # compare the actual values for a slice. _A = torch.tensor( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
83
1
import qiskit def snake_case ( snake_case__ :int = 2) -> qiskit.result.counts.Counts: _A = qubits # Using Aer's simulator _A = qiskit.Aer.get_backend("""aer_simulator""") # Creating a Quantum Circuit acting on the q register _A = qiskit.QuantumCircuit(snake_case__ , snake_case__) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0) for i in range(1 , snake_case__): # Adding CX (CNOT) gate circuit.cx(i - 1 , snake_case__) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(snake_case__)) , list(range(snake_case__))) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator _A = qiskit.execute(snake_case__ , snake_case__ , shots=1_000) return job.result().get_counts(snake_case__) if __name__ == "__main__": print(F'''Total count for various states are: {quantum_entanglement(3)}''')
83
def snake_case ( snake_case__ :int , snake_case__ :int) -> int: return int(input_a == input_a == 0) def snake_case ( ) -> None: print("""Truth Table of NOR Gate:""") print("""| Input 1 | Input 2 | Output |""") print(F'''| 0 | 0 | {nor_gate(0 , 0)} |''') print(F'''| 0 | 1 | {nor_gate(0 , 1)} |''') print(F'''| 1 | 0 | {nor_gate(1 , 0)} |''') print(F'''| 1 | 1 | {nor_gate(1 , 1)} |''') if __name__ == "__main__": import doctest doctest.testmod() main()
83
1
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) @add_end_docstrings( __lowerCAmelCase , r''' top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). ''' , ) class a ( __lowerCAmelCase ): """simple docstring""" def UpperCAmelCase ( self , lowerCAmelCase_ ) -> np.ndarray: if self.framework == "tf": _A = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": _A = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase_ ) else: raise ValueError("""Unsupported framework""" ) return masked_index def UpperCAmelCase ( self , lowerCAmelCase_ ) -> np.ndarray: _A = self.get_masked_index(lowerCAmelCase_ ) _A = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( """fill-mask""" , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Dict[str, GenericTensor]: if return_tensors is None: _A = self.framework _A = self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ ) self.ensure_exactly_one_mask_token(lowerCAmelCase_ ) return model_inputs def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]: _A = self.model(**lowerCAmelCase_ ) _A = model_inputs["""input_ids"""] return model_outputs def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=5 , lowerCAmelCase_=None ) -> Optional[int]: # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: _A = target_ids.shape[0] _A = model_outputs["""input_ids"""][0] _A = model_outputs["""logits"""] if self.framework == "tf": _A = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] _A = outputs.numpy() _A = outputs[0, masked_index, :] _A = stable_softmax(lowerCAmelCase_ , axis=-1 ) if target_ids is not None: _A = tf.gather_nd(tf.squeeze(lowerCAmelCase_ , 0 ) , target_ids.reshape(-1 , 1 ) ) _A = tf.expand_dims(lowerCAmelCase_ , 0 ) _A = tf.math.top_k(lowerCAmelCase_ , k=lowerCAmelCase_ ) _A , _A = topk.values.numpy(), topk.indices.numpy() else: _A = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase_ ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample _A = outputs[0, masked_index, :] _A = logits.softmax(dim=-1 ) if target_ids is not None: _A = probs[..., target_ids] _A , _A = probs.topk(lowerCAmelCase_ ) _A = [] _A = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): _A = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place _A = input_ids.numpy().copy() if target_ids is not None: _A = target_ids[p].tolist() _A = p # Filter padding out: _A = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back _A = self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ ) _A = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence} row.append(lowerCAmelCase_ ) result.append(lowerCAmelCase_ ) if single_mask: return result[0] return result def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Optional[Any]: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _A = [targets] try: _A = self.tokenizer.get_vocab() except Exception: _A = {} _A = [] for target in targets: _A = vocab.get(lowerCAmelCase_ , lowerCAmelCase_ ) if id_ is None: _A = self.tokenizer( lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , max_length=1 , truncation=lowerCAmelCase_ , )["""input_ids"""] if len(lowerCAmelCase_ ) == 0: logger.warning( F'''The specified target token `{target}` does not exist in the model vocabulary. ''' """We cannot replace it with anything meaningful, ignoring it""" ) continue _A = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F'''The specified target token `{target}` does not exist in the model vocabulary. ''' F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' ) target_ids.append(id_ ) _A = list(set(lowerCAmelCase_ ) ) if len(lowerCAmelCase_ ) == 0: raise ValueError("""At least one target must be provided when passed.""" ) _A = np.array(lowerCAmelCase_ ) return target_ids def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Union[str, Any]: _A = {} if targets is not None: _A = self.get_target_ids(lowerCAmelCase_ , lowerCAmelCase_ ) _A = target_ids if top_k is not None: _A = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( """fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" ) return {}, {}, postprocess_params def __call__( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict: _A = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) == 1: return outputs[0] return outputs
83
import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class a : """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=sys.maxsize ) -> str: _A = """bilinear""" _A = max_size _A = short_edge_length def __call__( self , lowerCAmelCase_ ) -> Optional[Any]: _A = [] for img in imgs: _A , _A = img.shape[:2] # later: provide list and randomly choose index for resize _A = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img _A = size * 1.0 / min(lowerCAmelCase_ , lowerCAmelCase_ ) if h < w: _A , _A = size, scale * w else: _A , _A = scale * h, size if max(lowerCAmelCase_ , lowerCAmelCase_ ) > self.max_size: _A = self.max_size * 1.0 / max(lowerCAmelCase_ , lowerCAmelCase_ ) _A = newh * scale _A = neww * scale _A = int(neww + 0.5 ) _A = int(newh + 0.5 ) if img.dtype == np.uinta: _A = Image.fromarray(lowerCAmelCase_ ) _A = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) _A = np.asarray(lowerCAmelCase_ ) else: _A = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw _A = nn.functional.interpolate( lowerCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase_ ).squeeze(0 ) img_augs.append(lowerCAmelCase_ ) return img_augs class a : """simple docstring""" def __init__( self , lowerCAmelCase_ ) -> List[Any]: _A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) _A = cfg.INPUT.FORMAT _A = cfg.SIZE_DIVISIBILITY _A = cfg.PAD_VALUE _A = cfg.INPUT.MAX_SIZE_TEST _A = cfg.MODEL.DEVICE _A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) _A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) _A = lambda lowerCAmelCase_ : (x - self.pixel_mean) / self.pixel_std def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: _A = tuple(max(lowerCAmelCase_ ) for s in zip(*[img.shape for img in images] ) ) _A = [im.shape[-2:] for im in images] _A = [ nn.functional.pad( lowerCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(lowerCAmelCase_ , lowerCAmelCase_ ) ] return torch.stack(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ ) def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int: with torch.no_grad(): if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _A = [images] if single_image: assert len(lowerCAmelCase_ ) == 1 for i in range(len(lowerCAmelCase_ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(lowerCAmelCase_ , images.pop(lowerCAmelCase_ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( lowerCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase_ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge _A = torch.tensor([im.shape[:2] for im in images] ) _A = self.aug(lowerCAmelCase_ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic _A = [self.normalizer(lowerCAmelCase_ ) for x in images] # now pad them to do the following operations _A , _A = self.pad(lowerCAmelCase_ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad _A = torch.true_divide(lowerCAmelCase_ , lowerCAmelCase_ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[Any]) -> Tuple: boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def snake_case ( snake_case__ :Optional[int] , snake_case__ :Tuple[int, int]) -> Optional[Any]: assert torch.isfinite(snake_case__).all(), "Box tensor contains infinite or NaN!" _A , _A = box_size tensor[:, 0].clamp_(min=0 , max=snake_case__) tensor[:, 1].clamp_(min=0 , max=snake_case__) tensor[:, 2].clamp_(min=0 , max=snake_case__) tensor[:, 3].clamp_(min=0 , max=snake_case__)
83
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = { 'configuration_distilbert': [ 'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DistilBertConfig', 'DistilBertOnnxConfig', ], 'tokenization_distilbert': ['DistilBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['DistilBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ 'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DistilBertForMaskedLM', 'DistilBertForMultipleChoice', 'DistilBertForQuestionAnswering', 'DistilBertForSequenceClassification', 'DistilBertForTokenClassification', 'DistilBertModel', 'DistilBertPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ 'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDistilBertForMaskedLM', 'TFDistilBertForMultipleChoice', 'TFDistilBertForQuestionAnswering', 'TFDistilBertForSequenceClassification', 'TFDistilBertForTokenClassification', 'TFDistilBertMainLayer', 'TFDistilBertModel', 'TFDistilBertPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ 'FlaxDistilBertForMaskedLM', 'FlaxDistilBertForMultipleChoice', 'FlaxDistilBertForQuestionAnswering', 'FlaxDistilBertForSequenceClassification', 'FlaxDistilBertForTokenClassification', 'FlaxDistilBertModel', 'FlaxDistilBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
83
from collections import defaultdict def snake_case ( snake_case__ :int) -> int: _A = 1 _A = True for v in tree[start]: if v not in visited: ret += dfs(snake_case__) if ret % 2 == 0: cuts.append(snake_case__) return ret def snake_case ( ) -> Any: dfs(1) if __name__ == "__main__": _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10, 9 _SCREAMING_SNAKE_CASE = defaultdict(list) _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
83
1
import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a ( __lowerCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase :Dict = GPTSanJapaneseTokenizer lowerCamelCase :List[Any] = False lowerCamelCase :List[str] = {'''do_clean_text''': False, '''add_prefix_space''': False} def UpperCAmelCase ( self ) -> int: super().setUp() # fmt: off _A = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""] # fmt: on _A = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀 _A = {"""unk_token""": """<unk>"""} _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.emoji_file , """w""" ) as emoji_writer: emoji_writer.write(json.dumps(lowerCAmelCase_ ) ) def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]: kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: _A = """こんにちは、世界。 \nこんばんは、㔺界。😀""" _A = """こんにちは、世界。 \nこんばんは、世界。😀""" return input_text, output_text def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]: _A , _A = self.get_input_output_texts(lowerCAmelCase_ ) _A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _A = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) return text, ids def UpperCAmelCase ( self ) -> str: pass # TODO add if relevant def UpperCAmelCase ( self ) -> Optional[Any]: pass # TODO add if relevant def UpperCAmelCase ( self ) -> Dict: pass # TODO add if relevant def UpperCAmelCase ( self ) -> Any: _A = self.get_tokenizer() # Testing tokenization _A = """こんにちは、世界。 こんばんは、㔺界。""" _A = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""] _A = tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # Testing conversion to ids without special tokens _A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] _A = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # Testing conversion to ids with special tokens _A = tokens + [tokenizer.unk_token] _A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] _A = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Dict: _A = self.get_tokenizer() # Testing tokenization _A = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。""" _A = """こんにちは、、、、世界。こんばんは、、、、世界。""" _A = tokenizer.encode(lowerCAmelCase_ ) _A = tokenizer.decode(lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @slow def UpperCAmelCase ( self ) -> List[Any]: _A = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization _A = """こんにちは、世界。""" _A = """こんばんは、㔺界。😀""" _A = """こんにちは、世界。こんばんは、世界。😀""" _A = tokenizer.encode(prefix_text + input_text ) _A = tokenizer.encode("""""" , prefix_text=prefix_text + input_text ) _A = tokenizer.encode(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ ) _A = tokenizer.decode(lowerCAmelCase_ ) _A = tokenizer.decode(lowerCAmelCase_ ) _A = tokenizer.decode(lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @slow def UpperCAmelCase ( self ) -> Tuple: _A = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization _A = """こんにちは、世界。""" _A = """こんばんは、㔺界。😀""" _A = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2 _A = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2 _A = [1] + [0] * (len_prefix + len_text + 1) _A = [1] * (len_prefix + len_text + 1) + [0] _A = [1] + [1] * (len_prefix) + [0] * (len_text + 1) _A = tokenizer(prefix_text + input_text ).token_type_ids _A = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids _A = tokenizer(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ ).token_type_ids self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @slow def UpperCAmelCase ( self ) -> List[Any]: _A = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) _A = tokenizer.encode("""あンいワ""" ) _A = tokenizer.encode("""""" , prefix_text="""あンいワ""" ) _A = tokenizer.encode("""いワ""" , prefix_text="""あン""" ) self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) ) self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) ) self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def UpperCAmelCase ( self ) -> str: _A = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) _A = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]] _A = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ ) _A = tokenizer.batch_encode_plus(lowerCAmelCase_ , padding=lowerCAmelCase_ ) # fmt: off _A = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]] _A = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] _A = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , lowerCAmelCase_ ) self.assertListEqual(x_token.token_type_ids , lowerCAmelCase_ ) self.assertListEqual(x_token.attention_mask , lowerCAmelCase_ ) self.assertListEqual(x_token_a.input_ids , lowerCAmelCase_ ) self.assertListEqual(x_token_a.token_type_ids , lowerCAmelCase_ ) self.assertListEqual(x_token_a.attention_mask , lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> List[Any]: # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def UpperCAmelCase ( self ) -> Dict: # tokenizer has no padding token pass
83
import heapq def snake_case ( snake_case__ :dict) -> set[int]: _A = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(snake_case__ , [-1 * len(snake_case__), (key, value)]) # chosen_vertices = set of chosen vertices _A = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices _A = heapq.heappop(snake_case__)[1][0] chosen_vertices.add(snake_case__) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: _A = elem[1][1].index(snake_case__) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(snake_case__) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() _SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
83
1
from __future__ import annotations from typing import Generic, TypeVar _SCREAMING_SNAKE_CASE = TypeVar('T') class a ( Generic[T] ): """simple docstring""" def __init__( self , lowerCAmelCase_ ) -> None: _A = data _A = self _A = 0 class a ( Generic[T] ): """simple docstring""" def __init__( self ) -> None: # map from node name to the node object _A = {} def UpperCAmelCase ( self , lowerCAmelCase_ ) -> None: # create a new set with x as its member _A = DisjointSetTreeNode(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> DisjointSetTreeNode[T]: # find the set x belongs to (with path-compression) _A = self.map[data] if elem_ref != elem_ref.parent: _A = self.find_set(elem_ref.parent.data ) return elem_ref.parent def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None: # helper function for union operation if nodea.rank > nodea.rank: _A = nodea else: _A = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None: # merge 2 disjoint sets self.link(self.find_set(lowerCAmelCase_ ) , self.find_set(lowerCAmelCase_ ) ) class a ( Generic[T] ): """simple docstring""" def __init__( self ) -> None: # connections: map from the node to the neighbouring nodes (with weights) _A = {} def UpperCAmelCase ( self , lowerCAmelCase_ ) -> None: # add a node ONLY if its not present in the graph if node not in self.connections: _A = {} def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None: # add an edge with the given weight self.add_node(lowerCAmelCase_ ) self.add_node(lowerCAmelCase_ ) _A = weight _A = weight def UpperCAmelCase ( self ) -> GraphUndirectedWeighted[T]: _A = [] _A = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda lowerCAmelCase_ : x[2] ) # creating the disjoint set _A = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(lowerCAmelCase_ ) # MST generation _A = 0 _A = 0 _A = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: _A , _A , _A = edges[index] index += 1 _A = disjoint_set.find_set(lowerCAmelCase_ ) _A = disjoint_set.find_set(lowerCAmelCase_ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) disjoint_set.union(lowerCAmelCase_ , lowerCAmelCase_ ) return graph
83
import math import unittest def snake_case ( snake_case__ :int) -> bool: assert isinstance(snake_case__ , snake_case__) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__) + 1) , 6): if number % i == 0 or number % (i + 2) == 0: return False return True class a ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> List[Any]: self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def UpperCAmelCase ( self ) -> Dict: with self.assertRaises(lowerCAmelCase_ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , ) self.assertFalse( is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
83
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :List[Any] = ['''pixel_values'''] def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PILImageResampling.BICUBIC , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 1 / 2_55 , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> None: super().__init__(**lowerCAmelCase_ ) _A = size if size is not None else {"""height""": 2_24, """width""": 2_24} _A = get_size_dict(lowerCAmelCase_ ) _A = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} _A = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name="""crop_size""" ) _A = do_resize _A = do_rescale _A = do_normalize _A = do_center_crop _A = crop_size _A = size _A = resample _A = rescale_factor _A = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _A = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray: _A = get_size_dict(lowerCAmelCase_ ) if "shortest_edge" in size: _A = get_resize_output_image_size(lowerCAmelCase_ , size=size["""shortest_edge"""] , default_to_square=lowerCAmelCase_ ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: _A = (size["""height"""], size["""width"""]) else: raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' ) return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray: _A = get_size_dict(lowerCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowerCAmelCase_ , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ) -> np.ndarray: return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray: return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ) -> BatchFeature: _A = do_resize if do_resize is not None else self.do_resize _A = do_rescale if do_rescale is not None else self.do_rescale _A = do_normalize if do_normalize is not None else self.do_normalize _A = do_center_crop if do_center_crop is not None else self.do_center_crop _A = crop_size if crop_size is not None else self.crop_size _A = get_size_dict(lowerCAmelCase_ , param_name="""crop_size""" , default_to_square=lowerCAmelCase_ ) _A = resample if resample is not None else self.resample _A = rescale_factor if rescale_factor is not None else self.rescale_factor _A = image_mean if image_mean is not None else self.image_mean _A = image_std if image_std is not None else self.image_std _A = size if size is not None else self.size _A = get_size_dict(lowerCAmelCase_ ) if not is_batched(lowerCAmelCase_ ): _A = [images] if not valid_images(lowerCAmelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. _A = [to_numpy_array(lowerCAmelCase_ ) for image in images] if do_resize: _A = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images] if do_center_crop: _A = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images] if do_rescale: _A = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images] if do_normalize: _A = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images] _A = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images] _A = {"""pixel_values""": images} return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
83
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = {'configuration_encoder_decoder': ['EncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['EncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['TFEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['FlaxEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
83
1
import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = 'ybelkada/fonts' def snake_case ( ) -> int: if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ''' """Pix2StructImageProcessor. Please upgrade torch.""") def snake_case ( snake_case__ :Dict , snake_case__ :str , snake_case__ :Optional[int]) -> Tuple: requires_backends(snake_case__ , ["""torch"""]) _check_torch_version() _A = image_tensor.unsqueeze(0) _A = torch.nn.functional.unfold(snake_case__ , (patch_height, patch_width) , stride=(patch_height, patch_width)) _A = patches.reshape(image_tensor.size(0) , image_tensor.size(1) , snake_case__ , snake_case__ , -1) _A = patches.permute(0 , 4 , 2 , 3 , 1).reshape( image_tensor.size(2) // patch_height , image_tensor.size(3) // patch_width , image_tensor.size(1) * patch_height * patch_width , ) return patches.unsqueeze(0) def snake_case ( snake_case__ :str , snake_case__ :int = 36 , snake_case__ :str = "black" , snake_case__ :str = "white" , snake_case__ :int = 5 , snake_case__ :int = 5 , snake_case__ :int = 5 , snake_case__ :int = 5 , snake_case__ :Optional[bytes] = None , snake_case__ :Optional[str] = None , ) -> Image.Image: requires_backends(snake_case__ , """vision""") # Add new lines so that each line is no more than 80 characters. _A = textwrap.TextWrapper(width=80) _A = wrapper.wrap(text=snake_case__) _A = """\n""".join(snake_case__) if font_bytes is not None and font_path is None: _A = io.BytesIO(snake_case__) elif font_path is not None: _A = font_path else: _A = hf_hub_download(snake_case__ , """Arial.TTF""") _A = ImageFont.truetype(snake_case__ , encoding="""UTF-8""" , size=snake_case__) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. _A = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , snake_case__)) _A , _A , _A , _A = temp_draw.textbbox((0, 0) , snake_case__ , snake_case__) # Create the actual image with a bit of padding around the text. _A = text_width + left_padding + right_padding _A = text_height + top_padding + bottom_padding _A = Image.new("""RGB""" , (image_width, image_height) , snake_case__) _A = ImageDraw.Draw(snake_case__) draw.text(xy=(left_padding, top_padding) , text=snake_case__ , fill=snake_case__ , font=snake_case__) return image def snake_case ( snake_case__ :np.ndarray , snake_case__ :str , **snake_case__ :List[str]) -> Union[str, Any]: requires_backends(snake_case__ , """vision""") # Convert to PIL image if necessary _A = to_pil_image(snake_case__) _A = render_text(snake_case__ , **snake_case__) _A = max(header_image.width , image.width) _A = int(image.height * (new_width / image.width)) _A = int(header_image.height * (new_width / header_image.width)) _A = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""") new_image.paste(header_image.resize((new_width, new_header_height)) , (0, 0)) new_image.paste(image.resize((new_width, new_height)) , (0, new_header_height)) # Convert back to the original framework if necessary _A = to_numpy_array(snake_case__) if infer_channel_dimension_format(snake_case__) == ChannelDimension.LAST: _A = to_channel_dimension_format(snake_case__ , ChannelDimension.LAST) return new_image class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Tuple = ['''flattened_patches'''] def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = 20_48 , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> None: super().__init__(**lowerCAmelCase_ ) _A = patch_size if patch_size is not None else {"""height""": 16, """width""": 16} _A = do_normalize _A = do_convert_rgb _A = max_patches _A = is_vqa def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) -> np.ndarray: requires_backends(self.extract_flattened_patches , """torch""" ) _check_torch_version() # convert to torch _A = to_channel_dimension_format(lowerCAmelCase_ , ChannelDimension.FIRST ) _A = torch.from_numpy(lowerCAmelCase_ ) _A , _A = patch_size["""height"""], patch_size["""width"""] _A , _A = get_image_size(lowerCAmelCase_ ) # maximize scale s.t. _A = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) _A = max(min(math.floor(scale * image_height / patch_height ) , lowerCAmelCase_ ) , 1 ) _A = max(min(math.floor(scale * image_width / patch_width ) , lowerCAmelCase_ ) , 1 ) _A = max(num_feasible_rows * patch_height , 1 ) _A = max(num_feasible_cols * patch_width , 1 ) _A = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=lowerCAmelCase_ , antialias=lowerCAmelCase_ , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] _A = torch_extract_patches(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _A = patches.shape _A = patches_shape[1] _A = patches_shape[2] _A = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] _A = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] _A = torch.arange(lowerCAmelCase_ ).reshape([rows, 1] ).repeat(1 , lowerCAmelCase_ ).reshape([rows * columns, 1] ) _A = torch.arange(lowerCAmelCase_ ).reshape([1, columns] ).repeat(lowerCAmelCase_ , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] _A = row_ids.to(torch.floataa ) _A = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] _A = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] _A = torch.nn.functional.pad(lowerCAmelCase_ , [0, 0, 0, max_patches - (rows * columns)] ).float() _A = to_numpy_array(lowerCAmelCase_ ) return result def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ) -> np.ndarray: if image.dtype == np.uinta: _A = image.astype(np.floataa ) # take mean across the whole `image` _A = np.mean(lowerCAmelCase_ ) _A = np.std(lowerCAmelCase_ ) _A = max(lowerCAmelCase_ , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , **lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ) -> ImageInput: _A = do_normalize if do_normalize is not None else self.do_normalize _A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _A = patch_size if patch_size is not None else self.patch_size _A = max_patches if max_patches is not None else self.max_patches _A = self.is_vqa if kwargs.get("""data_format""" , lowerCAmelCase_ ) is not None: raise ValueError("""data_format is not an accepted input as the outputs are """ ) _A = make_list_of_images(lowerCAmelCase_ ) if not valid_images(lowerCAmelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: _A = [convert_to_rgb(lowerCAmelCase_ ) for image in images] # All transformations expect numpy arrays. _A = [to_numpy_array(lowerCAmelCase_ ) for image in images] if is_vqa: if header_text is None: raise ValueError("""A header text must be provided for VQA models.""" ) _A = kwargs.pop("""font_bytes""" , lowerCAmelCase_ ) _A = kwargs.pop("""font_path""" , lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _A = [header_text] * len(lowerCAmelCase_ ) _A = [ render_header(lowerCAmelCase_ , header_text[i] , font_bytes=lowerCAmelCase_ , font_path=lowerCAmelCase_ ) for i, image in enumerate(lowerCAmelCase_ ) ] if do_normalize: _A = [self.normalize(image=lowerCAmelCase_ ) for image in images] # convert to torch tensor and permute _A = [ self.extract_flattened_patches(image=lowerCAmelCase_ , max_patches=lowerCAmelCase_ , patch_size=lowerCAmelCase_ ) for image in images ] # create attention mask in numpy _A = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] _A = BatchFeature( data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=lowerCAmelCase_ ) return encoded_outputs
83
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) @add_end_docstrings(__lowerCAmelCase ) class a ( __lowerCAmelCase ): """simple docstring""" def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]: super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) self.check_model_type(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple: _A , _A = {}, {} if padding is not None: _A = padding if truncation is not None: _A = truncation if top_k is not None: _A = top_k return preprocess_params, {}, postprocess_params def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ) -> Union[str, Any]: if isinstance(lowerCAmelCase_ , (Image.Image, str) ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _A = {"""image""": image, """question""": question} else: _A = image _A = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ ) return results def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any: _A = load_image(inputs["""image"""] ) _A = self.tokenizer( inputs["""question"""] , return_tensors=self.framework , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ ) _A = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework ) model_inputs.update(lowerCAmelCase_ ) return model_inputs def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: _A = self.model(**lowerCAmelCase_ ) return model_outputs def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> Union[str, Any]: if top_k > self.model.config.num_labels: _A = self.model.config.num_labels if self.framework == "pt": _A = model_outputs.logits.sigmoid()[0] _A , _A = probs.topk(lowerCAmelCase_ ) else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) _A = scores.tolist() _A = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
83
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
83
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :str , snake_case__ :PreTrainedTokenizer , snake_case__ :int , snake_case__ :Optional[int] = None , ) -> Optional[int]: _A = {} if train_file is not None: _A = [train_file] if eval_file is not None: _A = [eval_file] if test_file is not None: _A = [test_file] _A = datasets.load_dataset("""csv""" , data_files=snake_case__) _A = list(ds[list(files.keys())[0]].features.keys()) _A = features_name.pop(snake_case__) _A = list(set(ds[list(files.keys())[0]][label_name])) _A = {label: i for i, label in enumerate(snake_case__)} _A = tokenizer.model_input_names _A = {} if len(snake_case__) == 1: for k in files.keys(): _A = ds[k].map( lambda snake_case__: tokenizer.batch_encode_plus( example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""") , batched=snake_case__ , ) elif len(snake_case__) == 2: for k in files.keys(): _A = ds[k].map( lambda snake_case__: tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""" , ) , batched=snake_case__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) _A = ( tf.data.Dataset.from_generator( snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: _A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN]))) _A = ( tf.data.Dataset.from_generator( snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: _A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION]))) _A = ( tf.data.Dataset.from_generator( snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: _A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST]))) return train_ds, val_ds, test_ds, labelaid _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) @dataclass class a : """simple docstring""" lowerCamelCase :int = field(metadata={'''help''': '''Which column contains the label'''} ) lowerCamelCase :str = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the training file'''} ) lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the development file'''} ) lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the test file'''} ) lowerCamelCase :int = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) lowerCamelCase :bool = field( default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class a : """simple docstring""" lowerCamelCase :str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) lowerCamelCase :Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCamelCase :Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. lowerCamelCase :Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def snake_case ( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) _A , _A , _A = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""") # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.info( F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, ''' F'''16-bits training: {training_args.fpaa}''') logger.info(F'''Training/evaluation parameters {training_args}''') # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _A , _A , _A , _A = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) _A = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): _A = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , ) def compute_metrics(snake_case__ :EvalPrediction) -> Dict: _A = np.argmax(p.predictions , axis=1) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer _A = TFTrainer( model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir) # Evaluation _A = {} if training_args.do_eval: logger.info("""*** Evaluate ***""") _A = trainer.evaluate() _A = os.path.join(training_args.output_dir , """eval_results.txt""") with open(snake_case__ , """w""") as writer: logger.info("""***** Eval results *****""") for key, value in result.items(): logger.info(F''' {key} = {value}''') writer.write(F'''{key} = {value}\n''') results.update(snake_case__) return results if __name__ == "__main__": main()
83
1
import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class a ( __lowerCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase :int = AlbertTokenizer lowerCamelCase :Union[str, Any] = AlbertTokenizerFast lowerCamelCase :int = True lowerCamelCase :Any = True lowerCamelCase :List[str] = True def UpperCAmelCase ( self ) -> Optional[Any]: super().setUp() # We have a SentencePiece fixture for testing _A = AlbertTokenizer(lowerCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Dict: _A = """this is a test""" _A = """this is a test""" return input_text, output_text def UpperCAmelCase ( self ) -> Optional[int]: _A = """<pad>""" _A = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> List[Any]: _A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """▁eloquent""" ) self.assertEqual(len(lowerCAmelCase_ ) , 3_00_00 ) def UpperCAmelCase ( self ) -> List[str]: self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 ) def UpperCAmelCase ( self ) -> Optional[int]: if not self.test_rust_tokenizer: return _A = self.get_tokenizer() _A = self.get_rust_tokenizer() _A = """I was born in 92000, and this is falsé.""" _A = tokenizer.tokenize(lowerCAmelCase_ ) _A = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _A = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _A = self.get_rust_tokenizer() _A = tokenizer.encode(lowerCAmelCase_ ) _A = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Dict: _A = AlbertTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ ) _A = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCAmelCase_ , ["""▁this""", """▁is""", """▁a""", """▁test"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [48, 25, 21, 12_89] ) _A = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCAmelCase_ , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] ) _A = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] ) _A = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , ) def UpperCAmelCase ( self ) -> List[Any]: _A = AlbertTokenizer(lowerCAmelCase_ ) _A = tokenizer.encode("""sequence builders""" ) _A = tokenizer.encode("""multi-sequence build""" ) _A = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) _A = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def UpperCAmelCase ( self ) -> Union[str, Any]: # fmt: off _A = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase_ , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
83
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Union[str, Any] = '''speech_to_text''' lowerCamelCase :List[str] = ['''past_key_values'''] lowerCamelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , lowerCAmelCase_=1_00_00 , lowerCAmelCase_=12 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=60_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=2 , lowerCAmelCase_=(5, 5) , lowerCAmelCase_=10_24 , lowerCAmelCase_=80 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Tuple: _A = vocab_size _A = d_model _A = encoder_ffn_dim _A = encoder_layers _A = encoder_attention_heads _A = decoder_ffn_dim _A = decoder_layers _A = decoder_attention_heads _A = dropout _A = attention_dropout _A = activation_dropout _A = activation_function _A = init_std _A = encoder_layerdrop _A = decoder_layerdrop _A = use_cache _A = encoder_layers _A = scale_embedding # scale factor will be sqrt(d_model) if True _A = max_source_positions _A = max_target_positions _A = num_conv_layers _A = list(lowerCAmelCase_ ) _A = conv_channels _A = input_feat_per_channel _A = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """ F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' F'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
83
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { 'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Dict = '''wav2vec2''' def __init__( self , lowerCAmelCase_=32 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-5 , lowerCAmelCase_="group" , lowerCAmelCase_="gelu" , lowerCAmelCase_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCAmelCase_=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase_=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase_=False , lowerCAmelCase_=1_28 , lowerCAmelCase_=16 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0.05 , lowerCAmelCase_=10 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0 , lowerCAmelCase_=10 , lowerCAmelCase_=0 , lowerCAmelCase_=3_20 , lowerCAmelCase_=2 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1_00 , lowerCAmelCase_=2_56 , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_="sum" , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=2_56 , lowerCAmelCase_=(5_12, 5_12, 5_12, 5_12, 15_00) , lowerCAmelCase_=(5, 3, 3, 1, 1) , lowerCAmelCase_=(1, 2, 3, 1, 1) , lowerCAmelCase_=5_12 , lowerCAmelCase_=0 , lowerCAmelCase_=1 , lowerCAmelCase_=2 , lowerCAmelCase_=False , lowerCAmelCase_=3 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Optional[Any]: super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ ) _A = hidden_size _A = feat_extract_norm _A = feat_extract_activation _A = list(lowerCAmelCase_ ) _A = list(lowerCAmelCase_ ) _A = list(lowerCAmelCase_ ) _A = conv_bias _A = num_conv_pos_embeddings _A = num_conv_pos_embedding_groups _A = len(self.conv_dim ) _A = num_hidden_layers _A = intermediate_size _A = hidden_act _A = num_attention_heads _A = hidden_dropout _A = attention_dropout _A = activation_dropout _A = feat_proj_dropout _A = final_dropout _A = layerdrop _A = layer_norm_eps _A = initializer_range _A = vocab_size _A = do_stable_layer_norm _A = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _A = apply_spec_augment _A = mask_time_prob _A = mask_time_length _A = mask_time_min_masks _A = mask_feature_prob _A = mask_feature_length _A = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _A = num_codevectors_per_group _A = num_codevector_groups _A = contrastive_logits_temperature _A = feat_quantizer_dropout _A = num_negatives _A = codevector_dim _A = proj_codevector_dim _A = diversity_loss_weight # ctc loss _A = ctc_loss_reduction _A = ctc_zero_infinity # adapter _A = add_adapter _A = adapter_kernel_size _A = adapter_stride _A = num_adapter_layers _A = output_hidden_size or hidden_size _A = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. _A = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _A = list(lowerCAmelCase_ ) _A = list(lowerCAmelCase_ ) _A = list(lowerCAmelCase_ ) _A = xvector_output_dim @property def UpperCAmelCase ( self ) -> int: return functools.reduce(operator.mul , self.conv_stride , 1 )
83
from __future__ import annotations from collections.abc import Callable def snake_case ( snake_case__ :Callable[[int | float], int | float] , snake_case__ :int | float , snake_case__ :int | float , snake_case__ :int = 100 , ) -> float: _A = x_start _A = fnc(snake_case__) _A = 0.0 for _ in range(snake_case__): # Approximates small segments of curve as linear and solve # for trapezoidal area _A = (x_end - x_start) / steps + xa _A = fnc(snake_case__) area += abs(fxa + fxa) * (xa - xa) / 2 # Increment step _A = xa _A = fxa return area if __name__ == "__main__": def snake_case ( snake_case__ :Tuple) -> List[str]: return x**3 + x**2 print('f(x) = x^3 + x^2') print('The area between the curve, x = -5, x = 5 and the x axis is:') _SCREAMING_SNAKE_CASE = 10 while i <= 100_000: print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''') i *= 10
83
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = { 'configuration_blenderbot': [ 'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotConfig', 'BlenderbotOnnxConfig', ], 'tokenization_blenderbot': ['BlenderbotTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['BlenderbotTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ 'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotForCausalLM', 'BlenderbotForConditionalGeneration', 'BlenderbotModel', 'BlenderbotPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ 'TFBlenderbotForConditionalGeneration', 'TFBlenderbotModel', 'TFBlenderbotPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ 'FlaxBlenderbotForConditionalGeneration', 'FlaxBlenderbotModel', 'FlaxBlenderbotPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
83
import numpy as np import qiskit def snake_case ( snake_case__ :int = 8 , snake_case__ :int | None = None) -> str: _A = np.random.default_rng(seed=snake_case__) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. _A = 6 * key_len # Measurement basis for Alice's qubits. _A = rng.integers(2 , size=snake_case__) # The set of states Alice will prepare. _A = rng.integers(2 , size=snake_case__) # Measurement basis for Bob's qubits. _A = rng.integers(2 , size=snake_case__) # Quantum Circuit to simulate BB84 _A = qiskit.QuantumCircuit(snake_case__ , name="""BB84""") # Alice prepares her qubits according to rules above. for index, _ in enumerate(snake_case__): if alice_state[index] == 1: bbaa_circ.x(snake_case__) if alice_basis[index] == 1: bbaa_circ.h(snake_case__) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(snake_case__): if bob_basis[index] == 1: bbaa_circ.h(snake_case__) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. _A = qiskit.Aer.get_backend("""aer_simulator""") # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. _A = qiskit.execute(snake_case__ , snake_case__ , shots=1 , seed_simulator=snake_case__) # Returns the result of measurement. _A = job.result().get_counts(snake_case__).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. _A = """""".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( snake_case__ , snake_case__ , snake_case__) if alice_basis_bit == bob_basis_bit ]) # Get final key. Pad with 0 if too short, otherwise truncate. _A = gen_key[:key_len] if len(snake_case__) >= key_len else gen_key.ljust(snake_case__ , """0""") return key if __name__ == "__main__": print(F'''The generated key is : {bbaa(8, seed=0)}''') from doctest import testmod testmod()
83
1
import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class a ( __lowerCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase :Optional[int] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def UpperCAmelCase ( self , lowerCAmelCase_=0 ) -> Optional[Any]: _A = np.random.RandomState(lowerCAmelCase_ ) _A = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def UpperCAmelCase ( self ) -> Optional[Any]: _A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _A = self.get_dummy_inputs() _A = pipe(**lowerCAmelCase_ ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) _A = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase ( self ) -> str: _A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) _A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _A = self.get_dummy_inputs() _A = pipe(**lowerCAmelCase_ ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) _A = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase ( self ) -> Union[str, Any]: _A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) _A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _A = self.get_dummy_inputs() _A = pipe(**lowerCAmelCase_ ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) _A = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase ( self ) -> Optional[Any]: _A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) _A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _A = self.get_dummy_inputs() _A = pipe(**lowerCAmelCase_ ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) _A = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase ( self ) -> List[str]: _A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) _A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _A = self.get_dummy_inputs() _A = pipe(**lowerCAmelCase_ ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) _A = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase ( self ) -> Tuple: _A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) _A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _A = self.get_dummy_inputs() _A = pipe(**lowerCAmelCase_ ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) _A = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase ( self ) -> Optional[int]: _A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _A = self.get_dummy_inputs() _A = 3 * [inputs["""prompt"""]] # forward _A = pipe(**lowerCAmelCase_ ) _A = output.images[0, -3:, -3:, -1] _A = self.get_dummy_inputs() _A = 3 * [inputs.pop("""prompt""" )] _A = pipe.tokenizer( lowerCAmelCase_ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=lowerCAmelCase_ , return_tensors="""np""" , ) _A = text_inputs["""input_ids"""] _A = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] _A = prompt_embeds # forward _A = pipe(**lowerCAmelCase_ ) _A = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 def UpperCAmelCase ( self ) -> Optional[int]: _A = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _A = self.get_dummy_inputs() _A = 3 * ["""this is a negative prompt"""] _A = negative_prompt _A = 3 * [inputs["""prompt"""]] # forward _A = pipe(**lowerCAmelCase_ ) _A = output.images[0, -3:, -3:, -1] _A = self.get_dummy_inputs() _A = 3 * [inputs.pop("""prompt""" )] _A = [] for p in [prompt, negative_prompt]: _A = pipe.tokenizer( lowerCAmelCase_ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=lowerCAmelCase_ , return_tensors="""np""" , ) _A = text_inputs["""input_ids"""] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) _A , _A = embeds # forward _A = pipe(**lowerCAmelCase_ ) _A = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @nightly @require_onnxruntime @require_torch_gpu class a ( unittest.TestCase ): """simple docstring""" @property def UpperCAmelCase ( self ) -> int: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCAmelCase ( self ) -> List[str]: _A = ort.SessionOptions() _A = False return options def UpperCAmelCase ( self ) -> str: # using the PNDM scheduler by default _A = OnnxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _A = """A painting of a squirrel eating a burger""" np.random.seed(0 ) _A = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" ) _A = output.images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCAmelCase ( self ) -> Optional[int]: _A = DDIMScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) _A = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _A = """open neural network exchange""" _A = np.random.RandomState(0 ) _A = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase_ , output_type="""np""" ) _A = output.images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCAmelCase ( self ) -> Tuple: _A = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) _A = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _A = """open neural network exchange""" _A = np.random.RandomState(0 ) _A = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase_ , output_type="""np""" ) _A = output.images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCAmelCase ( self ) -> int: _A = 0 def test_callback_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None: _A = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) _A = latents[0, -3:, -3:, -1] _A = np.array( [-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) _A = latents[0, -3:, -3:, -1] _A = np.array( [-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 _A = False _A = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _A = """Andromeda galaxy in a bottle""" _A = np.random.RandomState(0 ) pipe( prompt=lowerCAmelCase_ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=1 , ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def UpperCAmelCase ( self ) -> Optional[Any]: _A = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) assert pipe.safety_checker is None _A = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCAmelCase_ ) _A = OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None _A = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None
83
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def snake_case ( snake_case__ :int) -> Optional[int]: return EnvironmentCommand() def snake_case ( snake_case__ :Tuple) -> List[str]: return EnvironmentCommand(args.accelerate_config_file) class a ( __lowerCAmelCase ): """simple docstring""" @staticmethod def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple: _A = parser.add_parser("""env""" ) download_parser.set_defaults(func=lowerCAmelCase_ ) download_parser.add_argument( """--accelerate-config_file""" , default=lowerCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , ) download_parser.set_defaults(func=lowerCAmelCase_ ) def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ ) -> None: _A = accelerate_config_file def UpperCAmelCase ( self ) -> Dict: _A = """not installed""" if is_safetensors_available(): import safetensors _A = safetensors.__version__ elif importlib.util.find_spec("""safetensors""" ) is not None: import safetensors _A = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.''' _A = """not installed""" _A = _A = """not found""" if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file _A = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ): _A = load_config_from_file(self._accelerate_config_file ).to_dict() _A = ( """\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else F'''\t{accelerate_config}''' ) _A = """not installed""" _A = """NA""" if is_torch_available(): import torch _A = torch.__version__ _A = torch.cuda.is_available() _A = """not installed""" _A = """NA""" if is_tf_available(): import tensorflow as tf _A = tf.__version__ try: # deprecated in v2.1 _A = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool _A = bool(tf.config.list_physical_devices("""GPU""" ) ) _A = """not installed""" _A = """not installed""" _A = """not installed""" _A = """NA""" if is_flax_available(): import flax import jax import jaxlib _A = flax.__version__ _A = jax.__version__ _A = jaxlib.__version__ _A = jax.lib.xla_bridge.get_backend().platform _A = { """`transformers` version""": version, """Platform""": platform.platform(), """Python version""": platform.python_version(), """Huggingface_hub version""": huggingface_hub.__version__, """Safetensors version""": F'''{safetensors_version}''', """Accelerate version""": F'''{accelerate_version}''', """Accelerate config""": F'''{accelerate_config_str}''', """PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''', """Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''', """Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''', """Jax version""": F'''{jax_version}''', """JaxLib version""": F'''{jaxlib_version}''', """Using GPU in script?""": """<fill in>""", """Using distributed or parallel set-up in script?""": """<fill in>""", } print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" ) print(self.format_dict(lowerCAmelCase_ ) ) return info @staticmethod def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple: return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
83
1
from collections import deque class a : """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None: _A = process_name # process name _A = arrival_time # arrival time of the process # completion time of finished process or last interrupted time _A = arrival_time _A = burst_time # remaining burst time _A = 0 # total time of the process wait in ready queue _A = 0 # time from arrival time to completion time class a : """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> None: # total number of mlfq's queues _A = number_of_queues # time slice of queues that round robin algorithm applied _A = time_slices # unfinished process is in this ready_queue _A = queue # current time _A = current_time # finished process is in this sequence queue _A = deque() def UpperCAmelCase ( self ) -> list[str]: _A = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def UpperCAmelCase ( self , lowerCAmelCase_ ) -> list[int]: _A = [] for i in range(len(lowerCAmelCase_ ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def UpperCAmelCase ( self , lowerCAmelCase_ ) -> list[int]: _A = [] for i in range(len(lowerCAmelCase_ ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def UpperCAmelCase ( self , lowerCAmelCase_ ) -> list[int]: _A = [] for i in range(len(lowerCAmelCase_ ) ): completion_times.append(queue[i].stop_time ) return completion_times def UpperCAmelCase ( self , lowerCAmelCase_ ) -> list[int]: return [q.burst_time for q in queue] def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int: process.waiting_time += self.current_time - process.stop_time return process.waiting_time def UpperCAmelCase ( self , lowerCAmelCase_ ) -> deque[Process]: _A = deque() # sequence deque of finished process while len(lowerCAmelCase_ ) != 0: _A = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(lowerCAmelCase_ ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 _A = 0 # set the process's turnaround time because it is finished _A = self.current_time - cp.arrival_time # set the completion time _A = self.current_time # add the process to queue that has finished queue finished.append(lowerCAmelCase_ ) self.finish_queue.extend(lowerCAmelCase_ ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> tuple[deque[Process], deque[Process]]: _A = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(lowerCAmelCase_ ) ): _A = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(lowerCAmelCase_ ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time _A = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(lowerCAmelCase_ ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished _A = 0 # set the finish time _A = self.current_time # update the process' turnaround time because it is finished _A = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(lowerCAmelCase_ ) self.finish_queue.extend(lowerCAmelCase_ ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def UpperCAmelCase ( self ) -> deque[Process]: # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): _A , _A = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest _SCREAMING_SNAKE_CASE = Process('P1', 0, 53) _SCREAMING_SNAKE_CASE = Process('P2', 0, 17) _SCREAMING_SNAKE_CASE = Process('P3', 0, 68) _SCREAMING_SNAKE_CASE = Process('P4', 0, 24) _SCREAMING_SNAKE_CASE = 3 _SCREAMING_SNAKE_CASE = [17, 25] _SCREAMING_SNAKE_CASE = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])}) _SCREAMING_SNAKE_CASE = Process('P1', 0, 53) _SCREAMING_SNAKE_CASE = Process('P2', 0, 17) _SCREAMING_SNAKE_CASE = Process('P3', 0, 68) _SCREAMING_SNAKE_CASE = Process('P4', 0, 24) _SCREAMING_SNAKE_CASE = 3 _SCREAMING_SNAKE_CASE = [17, 25] _SCREAMING_SNAKE_CASE = deque([Pa, Pa, Pa, Pa]) _SCREAMING_SNAKE_CASE = MLFQ(number_of_queues, time_slices, queue, 0) _SCREAMING_SNAKE_CASE = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F'''waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print completion times of processes(P1, P2, P3, P4) print( F'''completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print total turnaround times of processes(P1, P2, P3, P4) print( F'''turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print sequence of finished processes print( F'''sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}''' )
83
import colorsys from PIL import Image # type: ignore def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float: _A = x _A = y for step in range(snake_case__): # noqa: B007 _A = a * a - b * b + x _A = 2 * a * b + y _A = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def snake_case ( snake_case__ :float) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def snake_case ( snake_case__ :float) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1)) def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image: _A = Image.new("""RGB""" , (image_width, image_height)) _A = img.load() # loop through the image-coordinates for image_x in range(snake_case__): for image_y in range(snake_case__): # determine the figure-coordinates based on the image-coordinates _A = figure_width / image_width * image_height _A = figure_center_x + (image_x / image_width - 0.5) * figure_width _A = figure_center_y + (image_y / image_height - 0.5) * figure_height _A = get_distance(snake_case__ , snake_case__ , snake_case__) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: _A = get_color_coded_rgb(snake_case__) else: _A = get_black_and_white_rgb(snake_case__) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _SCREAMING_SNAKE_CASE = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
83
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['PLBartTokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ 'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST', 'PLBartForCausalLM', 'PLBartForConditionalGeneration', 'PLBartForSequenceClassification', 'PLBartModel', 'PLBartPreTrainedModel', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure)
83
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets _SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' _SCREAMING_SNAKE_CASE = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' _SCREAMING_SNAKE_CASE = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def snake_case ( snake_case__ :Optional[Any] , snake_case__ :str , snake_case__ :List[str]=False , snake_case__ :Dict=False , snake_case__ :Any=True , snake_case__ :List[str]=False , snake_case__ :Optional[Any]="dummy_doc") -> List[Any]: _A = {doc: key_lines} _A = {doc: sys_lines} _A = {} _A = 0 _A = 0 _A = 0 _A = 0 _A = 0 _A = 0 _A , _A = reader.get_doc_mentions(snake_case__ , key_doc_lines[doc] , snake_case__) key_singletons_num += singletons_num if NP_only or min_span: _A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__) _A , _A = reader.get_doc_mentions(snake_case__ , sys_doc_lines[doc] , snake_case__) sys_singletons_num += singletons_num if NP_only or min_span: _A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__) if remove_nested: _A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _A = reader.get_mention_assignments(snake_case__ , snake_case__) _A = reader.get_mention_assignments(snake_case__ , snake_case__) _A = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''') logger.info( """Number of resulting singleton clusters in the key """ F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''') if not keep_singletons: logger.info( F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' """files, respectively""") return doc_coref_infos def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Dict , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Tuple) -> int: _A = get_coref_infos(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__) _A = {} _A = 0 _A = 0 for name, metric in metrics: _A , _A , _A = evaluator.evaluate_documents(snake_case__ , snake_case__ , beta=1) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa}) logger.info( name.ljust(10) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , ) if conll_subparts_num == 3: _A = (conll / 3) * 100 logger.info(F'''CoNLL score: {conll:.2f}''') output_scores.update({"""conll_score""": conll}) return output_scores def snake_case ( snake_case__ :Union[str, Any]) -> List[Any]: _A = False for line in key_lines: if not line.startswith("""#"""): if len(line.split()) > 6: _A = line.split()[5] if not parse_col == "-": _A = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): """simple docstring""" def UpperCAmelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ] , ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Union[str, Any]: _A = [ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: _A = util.check_gold_parse_annotation(lowerCAmelCase_ ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _A = evaluate( key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , ) return score
83
1
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class a ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> Dict: _A = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) ) def UpperCAmelCase ( self ) -> Tuple: _A = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) ) def UpperCAmelCase ( self ) -> Optional[Any]: _A = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) ) def UpperCAmelCase ( self ) -> Any: _A = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ ) ) def UpperCAmelCase ( self ) -> int: _A = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", # Removed: 'text_encoder/model.safetensors', """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ ) ) def UpperCAmelCase ( self ) -> List[Any]: _A = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] _A = """fp16""" self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) ) def UpperCAmelCase ( self ) -> Optional[Any]: _A = [ """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] _A = """fp16""" self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) ) def UpperCAmelCase ( self ) -> Any: # pass variant but use the non-variant filenames _A = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] _A = """fp16""" self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) ) def UpperCAmelCase ( self ) -> Dict: _A = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _A = """fp16""" self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) ) def UpperCAmelCase ( self ) -> Tuple: _A = [ """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", ] _A = """fp16""" self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) ) def UpperCAmelCase ( self ) -> List[Any]: # pass variant but use the non-variant filenames _A = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] _A = """fp16""" self.assertTrue(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) ) def UpperCAmelCase ( self ) -> List[str]: _A = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", # 'text_encoder/model.fp16.safetensors', """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] _A = """fp16""" self.assertFalse(is_safetensors_compatible(lowerCAmelCase_ , variant=lowerCAmelCase_ ) )
83
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } _SCREAMING_SNAKE_CASE = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } _SCREAMING_SNAKE_CASE = {'facebook/blenderbot_small-90M': 512} def snake_case ( snake_case__ :Tuple) -> str: _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char)) _A = char _A = set(snake_case__) return pairs class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :List[Any] = VOCAB_FILES_NAMES lowerCamelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase :int = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="__start__" , lowerCAmelCase_="__end__" , lowerCAmelCase_="__unk__" , lowerCAmelCase_="__null__" , **lowerCAmelCase_ , ) -> int: super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ ) with open(lowerCAmelCase_ , encoding="""utf-8""" ) as vocab_handle: _A = json.load(lowerCAmelCase_ ) _A = {v: k for k, v in self.encoder.items()} with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle: _A = merges_handle.read().split("""\n""" )[1:-1] _A = [tuple(merge.split() ) for merge in merges] _A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _A = {} @property def UpperCAmelCase ( self ) -> int: return len(self.encoder ) def UpperCAmelCase ( self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: if token in self.cache: return self.cache[token] _A = re.sub("""([.,!?()])""" , r""" \1""" , lowerCAmelCase_ ) _A = re.sub("""(')""" , r""" \1 """ , lowerCAmelCase_ ) _A = re.sub(r"""\s{2,}""" , """ """ , lowerCAmelCase_ ) if "\n" in token: _A = token.replace("""\n""" , """ __newln__""" ) _A = token.split(""" """ ) _A = [] for token in tokens: if not len(lowerCAmelCase_ ): continue _A = token.lower() _A = tuple(lowerCAmelCase_ ) _A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) _A = get_pairs(lowerCAmelCase_ ) if not pairs: words.append(lowerCAmelCase_ ) continue while True: _A = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break _A , _A = bigram _A = [] _A = 0 while i < len(lowerCAmelCase_ ): try: _A = word.index(lowerCAmelCase_ , lowerCAmelCase_ ) new_word.extend(word[i:j] ) _A = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(lowerCAmelCase_ ) _A = new_word if len(lowerCAmelCase_ ) == 1: break else: _A = get_pairs(lowerCAmelCase_ ) _A = """@@ """.join(lowerCAmelCase_ ) _A = word[:-4] _A = word words.append(lowerCAmelCase_ ) return " ".join(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]: _A = [] _A = re.findall(r"""\S+\n?""" , lowerCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) ) return split_tokens def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int: _A = token.lower() return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: return self.decoder.get(lowerCAmelCase_ , self.unk_token ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: _A = """ """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip() return out_string def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _A = os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + """\n""" ) _A = 0 with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) _A = token_index writer.write(""" """.join(lowerCAmelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file
83
1
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class a ( __lowerCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase :List[Any] = CanineTokenizer lowerCamelCase :Tuple = False def UpperCAmelCase ( self ) -> Dict: super().setUp() _A = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase ( self ) -> Union[str, Any]: return CanineTokenizer.from_pretrained("""google/canine-s""" ) def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> CanineTokenizer: _A = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) _A = 10_24 return tokenizer @require_torch def UpperCAmelCase ( self ) -> int: _A = self.canine_tokenizer _A = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off _A = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0] # fmt: on _A = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="""pt""" ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) _A = list(batch.input_ids.numpy()[0] ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def UpperCAmelCase ( self ) -> Optional[int]: _A = self.canine_tokenizer _A = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] _A = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" , lowerCAmelCase_ ) self.assertIn("""attention_mask""" , lowerCAmelCase_ ) self.assertIn("""token_type_ids""" , lowerCAmelCase_ ) @require_torch def UpperCAmelCase ( self ) -> Any: _A = self.canine_tokenizer _A = [ """What's the weater?""", """It's about 25 degrees.""", ] _A = tokenizer( text_target=lowerCAmelCase_ , max_length=32 , padding="""max_length""" , truncation=lowerCAmelCase_ , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def UpperCAmelCase ( self ) -> int: # safety check on max_len default value so we are sure the test works _A = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test _A = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _A = tempfile.mkdtemp() _A = """ He is very happy, UNwant\u00E9d,running""" _A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) tokenizer.save_pretrained(lowerCAmelCase_ ) _A = tokenizer.__class__.from_pretrained(lowerCAmelCase_ ) _A = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) shutil.rmtree(lowerCAmelCase_ ) _A = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _A = tempfile.mkdtemp() _A = """ He is very happy, UNwant\u00E9d,running""" _A = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: _A = chr(0xe_007 ) additional_special_tokens.append(lowerCAmelCase_ ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) _A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) tokenizer.save_pretrained(lowerCAmelCase_ ) _A = tokenizer.__class__.from_pretrained(lowerCAmelCase_ ) _A = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertIn(lowerCAmelCase_ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) _A = tokenizer.__class__.from_pretrained(lowerCAmelCase_ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> List[str]: _A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _A , _A = self.get_clean_sequence(lowerCAmelCase_ ) # a special token for Canine can be defined as follows: _A = 0xe_005 _A = chr(lowerCAmelCase_ ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) _A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertEqual(len(lowerCAmelCase_ ) , 1 ) _A = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCAmelCase_ ) _A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , input_encoded + special_token_id ) _A = tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ ) self.assertTrue(special_token not in decoded ) def UpperCAmelCase ( self ) -> int: _A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _A = chr(0xe_005 ) _A = chr(0xe_006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCAmelCase_ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) _A = tokenizer.tokenize(lowerCAmelCase_ ) _A = tokenizer.tokenize(lowerCAmelCase_ ) self.assertEqual(len(lowerCAmelCase_ ) , 1 ) self.assertEqual(len(lowerCAmelCase_ ) , 1 ) self.assertEqual(token_a[0] , lowerCAmelCase_ ) self.assertEqual(token_a[0] , lowerCAmelCase_ ) @require_tokenizers def UpperCAmelCase ( self ) -> Any: _A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: _A = 0xe_006 _A = chr(lowerCAmelCase_ ) _A = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(lowerCAmelCase_ ) tokenizer.from_pretrained(lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> List[str]: _A = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowerCAmelCase_ ) with open(os.path.join(lowerCAmelCase_ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: _A = json.load(lowerCAmelCase_ ) with open(os.path.join(lowerCAmelCase_ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: _A = json.load(lowerCAmelCase_ ) # a special token for Canine can be defined as follows: _A = 0xe_006 _A = chr(lowerCAmelCase_ ) _A = [new_token_a] _A = [new_token_a] with open(os.path.join(lowerCAmelCase_ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) with open(os.path.join(lowerCAmelCase_ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _A = tokenizer_class.from_pretrained(lowerCAmelCase_ , extra_ids=0 ) self.assertIn(lowerCAmelCase_ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) _A = 0xe_007 _A = chr(lowerCAmelCase_ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _A = [AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ )] _A = tokenizer_class.from_pretrained( lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , extra_ids=0 ) self.assertIn(lowerCAmelCase_ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def UpperCAmelCase ( self ) -> Optional[Any]: _A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _A = """hello world""" if self.space_between_special_tokens: _A = """[CLS] hello world [SEP]""" else: _A = input _A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _A = tokenizer.decode(lowerCAmelCase_ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(lowerCAmelCase_ , [output, output.lower()] ) def UpperCAmelCase ( self ) -> Union[str, Any]: _A = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _A = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] _A = """a""" _A = ord(lowerCAmelCase_ ) for attr in attributes_list: setattr(lowerCAmelCase_ , attr + """_id""" , lowerCAmelCase_ ) self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ ) self.assertEqual(getattr(lowerCAmelCase_ , attr + """_id""" ) , lowerCAmelCase_ ) setattr(lowerCAmelCase_ , attr + """_id""" , lowerCAmelCase_ ) self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ ) self.assertEqual(getattr(lowerCAmelCase_ , attr + """_id""" ) , lowerCAmelCase_ ) setattr(lowerCAmelCase_ , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens_ids""" ) , [] ) _A = 0xe_006 _A = chr(lowerCAmelCase_ ) setattr(lowerCAmelCase_ , """additional_special_tokens_ids""" , [additional_special_token_id] ) self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens""" ) , [additional_special_token] ) self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens_ids""" ) , [additional_special_token_id] ) def UpperCAmelCase ( self ) -> List[str]: pass def UpperCAmelCase ( self ) -> Tuple: pass def UpperCAmelCase ( self ) -> Union[str, Any]: pass def UpperCAmelCase ( self ) -> Optional[Any]: pass def UpperCAmelCase ( self ) -> Any: pass def UpperCAmelCase ( self ) -> Optional[Any]: pass def UpperCAmelCase ( self ) -> List[Any]: pass def UpperCAmelCase ( self ) -> Optional[Any]: pass
83
_SCREAMING_SNAKE_CASE = { 'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...', '8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.', ':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.', '?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-', '(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/' } # Exclamation mark is not in ITU-R recommendation # fmt: on _SCREAMING_SNAKE_CASE = {value: key for key, value in MORSE_CODE_DICT.items()} def snake_case ( snake_case__ :str) -> str: return " ".join(MORSE_CODE_DICT[char] for char in message.upper()) def snake_case ( snake_case__ :str) -> str: return "".join(REVERSE_DICT[char] for char in message.split()) def snake_case ( ) -> None: _A = """Morse code here!""" print(snake_case__) _A = encrypt(snake_case__) print(snake_case__) _A = decrypt(snake_case__) print(snake_case__) if __name__ == "__main__": main()
83
1
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) @add_end_docstrings(__lowerCAmelCase ) class a ( __lowerCAmelCase ): """simple docstring""" def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]: super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) self.check_model_type(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple: _A , _A = {}, {} if padding is not None: _A = padding if truncation is not None: _A = truncation if top_k is not None: _A = top_k return preprocess_params, {}, postprocess_params def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ) -> Union[str, Any]: if isinstance(lowerCAmelCase_ , (Image.Image, str) ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _A = {"""image""": image, """question""": question} else: _A = image _A = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ ) return results def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any: _A = load_image(inputs["""image"""] ) _A = self.tokenizer( inputs["""question"""] , return_tensors=self.framework , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ ) _A = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework ) model_inputs.update(lowerCAmelCase_ ) return model_inputs def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: _A = self.model(**lowerCAmelCase_ ) return model_outputs def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> Union[str, Any]: if top_k > self.model.config.num_labels: _A = self.model.config.num_labels if self.framework == "pt": _A = model_outputs.logits.sigmoid()[0] _A , _A = probs.topk(lowerCAmelCase_ ) else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) _A = scores.tolist() _A = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
83
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { 'configuration_jukebox': [ 'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'JukeboxConfig', 'JukeboxPriorConfig', 'JukeboxVQVAEConfig', ], 'tokenization_jukebox': ['JukeboxTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ 'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'JukeboxModel', 'JukeboxPreTrainedModel', 'JukeboxVQVAE', 'JukeboxPrior', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
83
1
def snake_case ( ) -> List[str]: for n in range(1 , 1_000_000): yield n * (n + 1) // 2 def snake_case ( snake_case__ :List[Any]) -> Optional[Any]: _A = 1 _A = 2 while i * i <= n: _A = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def snake_case ( ) -> Tuple: return next(i for i in triangle_number_generator() if count_divisors(snake_case__) > 500) if __name__ == "__main__": print(solution())
83
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Tuple = '''philschmid/bart-large-cnn-samsum''' lowerCamelCase :Tuple = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) lowerCamelCase :List[Any] = '''summarizer''' lowerCamelCase :List[str] = AutoTokenizer lowerCamelCase :Dict = AutoModelForSeqaSeqLM lowerCamelCase :int = ['''text'''] lowerCamelCase :List[Any] = ['''text'''] def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]: return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: return self.model.generate(**lowerCAmelCase_ )[0] def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
83
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Union[str, Any] = '''speech_to_text''' lowerCamelCase :List[str] = ['''past_key_values'''] lowerCamelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , lowerCAmelCase_=1_00_00 , lowerCAmelCase_=12 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=60_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=2 , lowerCAmelCase_=(5, 5) , lowerCAmelCase_=10_24 , lowerCAmelCase_=80 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Tuple: _A = vocab_size _A = d_model _A = encoder_ffn_dim _A = encoder_layers _A = encoder_attention_heads _A = decoder_ffn_dim _A = decoder_layers _A = decoder_attention_heads _A = dropout _A = attention_dropout _A = activation_dropout _A = activation_function _A = init_std _A = encoder_layerdrop _A = decoder_layerdrop _A = use_cache _A = encoder_layers _A = scale_embedding # scale factor will be sqrt(d_model) if True _A = max_source_positions _A = max_target_positions _A = num_conv_layers _A = list(lowerCAmelCase_ ) _A = conv_channels _A = input_feat_per_channel _A = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """ F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' F'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
83
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = [ ('bert.bert', 'visual_bert'), ('bert.cls', 'cls'), ('bert.classifier', 'cls'), ('token_type_embeddings_visual', 'visual_token_type_embeddings'), ('position_embeddings_visual', 'visual_position_embeddings'), ('projection', 'visual_projection'), ] _SCREAMING_SNAKE_CASE = [ 'nlvr2_coco_pre_trained.th', 'nlvr2_fine_tuned.th', 'nlvr2_pre_trained.th', 'vcr_coco_pre_train.th', 'vcr_fine_tune.th', 'vcr_pre_train.th', 'vqa_coco_pre_trained.th', 'vqa_fine_tuned.th', 'vqa_pre_trained.th', ] def snake_case ( snake_case__ :Union[str, Any]) -> Dict: _A = torch.load(snake_case__ , map_location="""cpu""") return sd def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :int=rename_keys_prefix) -> Optional[Any]: _A = OrderedDict() _A = torch.arange(config.max_position_embeddings).expand((1, -1)) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue _A = key for name_pair in rename_keys_prefix: _A = new_key.replace(name_pair[0] , name_pair[1]) _A = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately _A = new_d["""cls.predictions.bias"""] return new_d @torch.no_grad() def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple) -> int: assert ( checkpoint_path.split("""/""")[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: _A = """pretraining""" if "vcr" in checkpoint_path: _A = {"""visual_embedding_dim""": 512} elif "vqa_advanced" in checkpoint_path: _A = {"""visual_embedding_dim""": 2_048} elif "vqa" in checkpoint_path: _A = {"""visual_embedding_dim""": 2_048} elif "nlvr" in checkpoint_path: _A = {"""visual_embedding_dim""": 1_024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''') else: if "vcr" in checkpoint_path: _A = {"""visual_embedding_dim""": 512} _A = """multichoice""" elif "vqa_advanced" in checkpoint_path: _A = {"""visual_embedding_dim""": 2_048} _A = """vqa_advanced""" elif "vqa" in checkpoint_path: _A = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129} _A = """vqa""" elif "nlvr" in checkpoint_path: _A = { """visual_embedding_dim""": 1_024, """num_labels""": 2, } _A = """nlvr""" _A = VisualBertConfig(**snake_case__) # Load State Dict _A = load_state_dict(snake_case__) _A = get_new_dict(snake_case__ , snake_case__) if model_type == "pretraining": _A = VisualBertForPreTraining(snake_case__) elif model_type == "vqa": _A = VisualBertForQuestionAnswering(snake_case__) elif model_type == "nlvr": _A = VisualBertForVisualReasoning(snake_case__) elif model_type == "multichoice": _A = VisualBertForMultipleChoice(snake_case__) model.load_state_dict(snake_case__) # Save Checkpoints Path(snake_case__).mkdir(exist_ok=snake_case__) model.save_pretrained(snake_case__) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.') parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.') _SCREAMING_SNAKE_CASE = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
83
1
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } _SCREAMING_SNAKE_CASE = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } _SCREAMING_SNAKE_CASE = {'facebook/blenderbot_small-90M': 512} def snake_case ( snake_case__ :Tuple) -> str: _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char)) _A = char _A = set(snake_case__) return pairs class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :List[Any] = VOCAB_FILES_NAMES lowerCamelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase :int = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="__start__" , lowerCAmelCase_="__end__" , lowerCAmelCase_="__unk__" , lowerCAmelCase_="__null__" , **lowerCAmelCase_ , ) -> int: super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ ) with open(lowerCAmelCase_ , encoding="""utf-8""" ) as vocab_handle: _A = json.load(lowerCAmelCase_ ) _A = {v: k for k, v in self.encoder.items()} with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle: _A = merges_handle.read().split("""\n""" )[1:-1] _A = [tuple(merge.split() ) for merge in merges] _A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _A = {} @property def UpperCAmelCase ( self ) -> int: return len(self.encoder ) def UpperCAmelCase ( self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: if token in self.cache: return self.cache[token] _A = re.sub("""([.,!?()])""" , r""" \1""" , lowerCAmelCase_ ) _A = re.sub("""(')""" , r""" \1 """ , lowerCAmelCase_ ) _A = re.sub(r"""\s{2,}""" , """ """ , lowerCAmelCase_ ) if "\n" in token: _A = token.replace("""\n""" , """ __newln__""" ) _A = token.split(""" """ ) _A = [] for token in tokens: if not len(lowerCAmelCase_ ): continue _A = token.lower() _A = tuple(lowerCAmelCase_ ) _A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) _A = get_pairs(lowerCAmelCase_ ) if not pairs: words.append(lowerCAmelCase_ ) continue while True: _A = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break _A , _A = bigram _A = [] _A = 0 while i < len(lowerCAmelCase_ ): try: _A = word.index(lowerCAmelCase_ , lowerCAmelCase_ ) new_word.extend(word[i:j] ) _A = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(lowerCAmelCase_ ) _A = new_word if len(lowerCAmelCase_ ) == 1: break else: _A = get_pairs(lowerCAmelCase_ ) _A = """@@ """.join(lowerCAmelCase_ ) _A = word[:-4] _A = word words.append(lowerCAmelCase_ ) return " ".join(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]: _A = [] _A = re.findall(r"""\S+\n?""" , lowerCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) ) return split_tokens def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int: _A = token.lower() return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: return self.decoder.get(lowerCAmelCase_ , self.unk_token ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: _A = """ """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip() return out_string def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _A = os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + """\n""" ) _A = 0 with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) _A = token_index writer.write(""" """.join(lowerCAmelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file
83
from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class a ( __lowerCAmelCase ): """simple docstring""" def UpperCAmelCase ( self ) -> List[str]: return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def UpperCAmelCase ( self ) -> Optional[int]: _A = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]} return Dataset.from_dict(lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Dict: _A = self._create_example_records() _A = Dataset.from_list(lowerCAmelCase_ ) self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] ) for i, r in enumerate(lowerCAmelCase_ ): self.assertDictEqual(lowerCAmelCase_ , example_records[i] ) def UpperCAmelCase ( self ) -> str: _A = self._create_example_records() _A = Dataset.from_list(lowerCAmelCase_ ) _A = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} ) self.assertEqual(dset.info , dset_from_dict.info ) def UpperCAmelCase ( self ) -> Any: # checks what happens with missing columns _A = [{"""col_1""": 1}, {"""col_2""": """x"""}] _A = Dataset.from_list(lowerCAmelCase_ ) self.assertDictEqual(dset[0] , {"""col_1""": 1} ) self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns def UpperCAmelCase ( self ) -> Tuple: # checks if the type can be inferred from the second record _A = [{"""col_1""": []}, {"""col_1""": [1, 2]}] _A = Dataset.from_list(lowerCAmelCase_ ) self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) ) def UpperCAmelCase ( self ) -> Any: _A = Dataset.from_list([] ) self.assertEqual(len(lowerCAmelCase_ ) , 0 ) self.assertListEqual(dset.column_names , [] )
83
1
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class a ( __lowerCAmelCase ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> Optional[Any]: super().__init__( lowerCAmelCase_ , split=lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ , streaming=lowerCAmelCase_ , num_proc=lowerCAmelCase_ , **lowerCAmelCase_ , ) _A = field _A = path_or_paths if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else {self.split: path_or_paths} _A = Json( cache_dir=lowerCAmelCase_ , data_files=lowerCAmelCase_ , features=lowerCAmelCase_ , field=lowerCAmelCase_ , **lowerCAmelCase_ , ) def UpperCAmelCase ( self ) -> int: # Build iterable dataset if self.streaming: _A = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _A = None _A = None _A = None _A = None self.builder.download_and_prepare( download_config=lowerCAmelCase_ , download_mode=lowerCAmelCase_ , verification_mode=lowerCAmelCase_ , base_path=lowerCAmelCase_ , num_proc=self.num_proc , ) _A = self.builder.as_dataset( split=self.split , verification_mode=lowerCAmelCase_ , in_memory=self.keep_in_memory ) return dataset class a : """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> List[Any]: if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' ) _A = dataset _A = path_or_buf _A = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _A = num_proc _A = """utf-8""" _A = to_json_kwargs def UpperCAmelCase ( self ) -> int: _A = self.to_json_kwargs.pop("""path_or_buf""" , lowerCAmelCase_ ) _A = self.to_json_kwargs.pop("""orient""" , """records""" ) _A = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False ) _A = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True ) _A = self.to_json_kwargs.pop("""compression""" , lowerCAmelCase_ ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , """wb""" , compression=lowerCAmelCase_ ) as buffer: _A = self._write(file_obj=lowerCAmelCase_ , orient=lowerCAmelCase_ , lines=lowerCAmelCase_ , index=lowerCAmelCase_ , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F'''The compression parameter is not supported when writing to a buffer, but compression={compression}''' """ was passed. Please provide a local path instead.""" ) _A = self._write( file_obj=self.path_or_buf , orient=lowerCAmelCase_ , lines=lowerCAmelCase_ , index=lowerCAmelCase_ , **self.to_json_kwargs ) return written def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: _A , _A , _A , _A , _A = args _A = query_table( table=self.dataset.data , key=slice(lowerCAmelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , ) _A = batch.to_pandas().to_json( path_or_buf=lowerCAmelCase_ , orient=lowerCAmelCase_ , lines=lowerCAmelCase_ , index=lowerCAmelCase_ , **lowerCAmelCase_ ) if not json_str.endswith("""\n""" ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ , ) -> int: _A = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): _A = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(lowerCAmelCase_ ) else: _A , _A = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCAmelCase_ , lowerCAmelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): written += file_obj.write(lowerCAmelCase_ ) return written
83
def snake_case ( snake_case__ :int = 1_000_000) -> int: _A = set(range(3 , snake_case__ , 2)) primes.add(2) for p in range(3 , snake_case__ , 2): if p not in primes: continue primes.difference_update(set(range(p * p , snake_case__ , snake_case__))) _A = [float(snake_case__) for n in range(limit + 1)] for p in primes: for n in range(snake_case__ , limit + 1 , snake_case__): phi[n] *= 1 - 1 / p return int(sum(phi[2:])) if __name__ == "__main__": print(F'''{solution() = }''')
83
1
# using dfs for finding eulerian path traversal def snake_case ( snake_case__ :Any , snake_case__ :Union[str, Any] , snake_case__ :Union[str, Any] , snake_case__ :Tuple=None) -> Dict: _A = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: _A , _A = True, True _A = dfs(snake_case__ , snake_case__ , snake_case__ , snake_case__) return path def snake_case ( snake_case__ :List[Any] , snake_case__ :str) -> Tuple: _A = 0 _A = -1 for i in range(snake_case__): if i not in graph.keys(): continue if len(graph[i]) % 2 == 1: odd_degree_nodes += 1 _A = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :Any) -> Optional[int]: _A = [[False for _ in range(max_node + 1)] for _ in range(max_node + 1)] _A , _A = check_circuit_or_path(snake_case__ , snake_case__) if check == 3: print("""graph is not Eulerian""") print("""no path""") return _A = 1 if check == 2: _A = odd_node print("""graph has a Euler path""") if check == 1: print("""graph has a Euler cycle""") _A = dfs(snake_case__ , snake_case__ , snake_case__) print(snake_case__) def snake_case ( ) -> Any: _A = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} _A = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} _A = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} _A = {1: [2, 3], 2: [1, 3], 3: [1, 2]} _A = { 1: [], 2: [] # all degree is zero } _A = 10 check_euler(snake_case__ , snake_case__) check_euler(snake_case__ , snake_case__) check_euler(snake_case__ , snake_case__) check_euler(snake_case__ , snake_case__) check_euler(snake_case__ , snake_case__) if __name__ == "__main__": main()
83
import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class a ( __lowerCAmelCase ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Union[str, Any]: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_input_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = relative_attention _A = position_biased_input _A = pos_att_type _A = scope def UpperCAmelCase ( self ) -> Dict: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self ) -> Optional[int]: return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _A = DebertaVaModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] _A = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] _A = model(lowerCAmelCase_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: _A = DebertaVaForMaskedLM(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: _A = self.num_labels _A = DebertaVaForSequenceClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: _A = self.num_labels _A = DebertaVaForTokenClassification(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]: _A = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: _A = DebertaVaForMultipleChoice(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self ) -> Optional[int]: _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase :int = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) lowerCamelCase :str = ( { '''feature-extraction''': DebertaVaModel, '''fill-mask''': DebertaVaForMaskedLM, '''question-answering''': DebertaVaForQuestionAnswering, '''text-classification''': DebertaVaForSequenceClassification, '''token-classification''': DebertaVaForTokenClassification, '''zero-shot''': DebertaVaForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase :str = True lowerCamelCase :Union[str, Any] = False lowerCamelCase :Optional[int] = False lowerCamelCase :List[str] = False lowerCamelCase :str = False def UpperCAmelCase ( self ) -> Optional[int]: _A = DebertaVaModelTester(self ) _A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 ) def UpperCAmelCase ( self ) -> List[str]: self.config_tester.run_common_tests() def UpperCAmelCase ( self ) -> List[str]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Dict: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> int: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Dict: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Optional[int]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ ) @slow def UpperCAmelCase ( self ) -> Any: for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = DebertaVaModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class a ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason="""Model not available yet""" ) def UpperCAmelCase ( self ) -> int: pass @slow def UpperCAmelCase ( self ) -> Optional[Any]: _A = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" ) _A = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] ) _A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0] # compare the actual values for a slice. _A = torch.tensor( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
83
1
import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py _SCREAMING_SNAKE_CASE = '.' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) _SCREAMING_SNAKE_CASE = [ 'Assert', 'AssignVariableOp', 'EmptyTensorList', 'MergeV2Checkpoints', 'ReadVariableOp', 'ResourceGather', 'RestoreV2', 'SaveV2', 'ShardedFilename', 'StatefulPartitionedCall', 'StaticRegexFullMatch', 'VarHandleOp', ] def snake_case ( snake_case__ :List[Any] , snake_case__ :Optional[int] , snake_case__ :Optional[int]) -> int: _A = SavedModel() _A = [] with open(os.path.join(snake_case__ , """utils""" , """tf_ops""" , """onnx.json""")) as f: _A = json.load(snake_case__)["""opsets"""] for i in range(1 , opset + 1): onnx_ops.extend(onnx_opsets[str(snake_case__)]) with open(snake_case__ , """rb""") as f: saved_model.ParseFromString(f.read()) _A = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def) # Convert to list, sorted if you want _A = sorted(snake_case__) _A = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(snake_case__) if strict and len(snake_case__) > 0: raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops) elif len(snake_case__) > 0: print(F'''Found the following incompatible ops for the opset {opset}:''') print(*snake_case__ , sep="""\n""") else: print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''') if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).') parser.add_argument( '--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.' ) parser.add_argument( '--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.' ) parser.add_argument( '--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)' ) _SCREAMING_SNAKE_CASE = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
83
def snake_case ( snake_case__ :int , snake_case__ :int) -> int: return int(input_a == input_a == 0) def snake_case ( ) -> None: print("""Truth Table of NOR Gate:""") print("""| Input 1 | Input 2 | Output |""") print(F'''| 0 | 0 | {nor_gate(0 , 0)} |''') print(F'''| 0 | 1 | {nor_gate(0 , 1)} |''') print(F'''| 1 | 0 | {nor_gate(1 , 0)} |''') print(F'''| 1 | 1 | {nor_gate(1 , 1)} |''') if __name__ == "__main__": import doctest doctest.testmod() main()
83
1
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def snake_case ( snake_case__ :Any) -> str: _A = filter(lambda snake_case__: p.requires_grad , model.parameters()) _A = sum([np.prod(p.size()) for p in model_parameters]) return params _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) def snake_case ( snake_case__ :str , snake_case__ :Any) -> Optional[int]: if metric == "rouge2": _A = """{val_avg_rouge2:.4f}-{step_count}""" elif metric == "bleu": _A = """{val_avg_bleu:.4f}-{step_count}""" elif metric == "em": _A = """{val_avg_em:.4f}-{step_count}""" elif metric == "loss": _A = """{val_avg_loss:.4f}-{step_count}""" else: raise NotImplementedError( F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' """ function.""") _A = ModelCheckpoint( dirpath=snake_case__ , filename=snake_case__ , monitor=F'''val_{metric}''' , mode="""max""" , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[int]) -> Dict: return EarlyStopping( monitor=F'''val_{metric}''' , mode="""min""" if """loss""" in metric else """max""" , patience=snake_case__ , verbose=snake_case__ , ) class a ( pl.Callback ): """simple docstring""" def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: _A = {F'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(lowerCAmelCase_ ) @rank_zero_only def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True ) -> None: logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) _A = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} ) # Log results _A = Path(pl_module.hparams.output_dir ) if type_path == "test": _A = od / """test_results.txt""" _A = od / """test_generations.txt""" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _A = od / F'''{type_path}_results/{trainer.global_step:05d}.txt''' _A = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=lowerCAmelCase_ ) generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ ) with open(lowerCAmelCase_ , """a+""" ) as writer: for key in sorted(lowerCAmelCase_ ): if key in ["log", "progress_bar", "preds"]: continue _A = metrics[key] if isinstance(lowerCAmelCase_ , torch.Tensor ): _A = val.item() _A = F'''{key}: {val:.6f}\n''' writer.write(lowerCAmelCase_ ) if not save_generations: return if "preds" in metrics: _A = """\n""".join(metrics["""preds"""] ) generations_file.open("""w+""" ).write(lowerCAmelCase_ ) @rank_zero_only def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: try: _A = pl_module.model.model.num_parameters() except AttributeError: _A = pl_module.model.num_parameters() _A = count_trainable_parameters(lowerCAmelCase_ ) # mp stands for million parameters trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} ) @rank_zero_only def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , """test""" ) @rank_zero_only def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
83
import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class a : """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=sys.maxsize ) -> str: _A = """bilinear""" _A = max_size _A = short_edge_length def __call__( self , lowerCAmelCase_ ) -> Optional[Any]: _A = [] for img in imgs: _A , _A = img.shape[:2] # later: provide list and randomly choose index for resize _A = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img _A = size * 1.0 / min(lowerCAmelCase_ , lowerCAmelCase_ ) if h < w: _A , _A = size, scale * w else: _A , _A = scale * h, size if max(lowerCAmelCase_ , lowerCAmelCase_ ) > self.max_size: _A = self.max_size * 1.0 / max(lowerCAmelCase_ , lowerCAmelCase_ ) _A = newh * scale _A = neww * scale _A = int(neww + 0.5 ) _A = int(newh + 0.5 ) if img.dtype == np.uinta: _A = Image.fromarray(lowerCAmelCase_ ) _A = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) _A = np.asarray(lowerCAmelCase_ ) else: _A = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw _A = nn.functional.interpolate( lowerCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase_ ).squeeze(0 ) img_augs.append(lowerCAmelCase_ ) return img_augs class a : """simple docstring""" def __init__( self , lowerCAmelCase_ ) -> List[Any]: _A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) _A = cfg.INPUT.FORMAT _A = cfg.SIZE_DIVISIBILITY _A = cfg.PAD_VALUE _A = cfg.INPUT.MAX_SIZE_TEST _A = cfg.MODEL.DEVICE _A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) _A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) _A = lambda lowerCAmelCase_ : (x - self.pixel_mean) / self.pixel_std def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: _A = tuple(max(lowerCAmelCase_ ) for s in zip(*[img.shape for img in images] ) ) _A = [im.shape[-2:] for im in images] _A = [ nn.functional.pad( lowerCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(lowerCAmelCase_ , lowerCAmelCase_ ) ] return torch.stack(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ ) def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int: with torch.no_grad(): if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _A = [images] if single_image: assert len(lowerCAmelCase_ ) == 1 for i in range(len(lowerCAmelCase_ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(lowerCAmelCase_ , images.pop(lowerCAmelCase_ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( lowerCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase_ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge _A = torch.tensor([im.shape[:2] for im in images] ) _A = self.aug(lowerCAmelCase_ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic _A = [self.normalizer(lowerCAmelCase_ ) for x in images] # now pad them to do the following operations _A , _A = self.pad(lowerCAmelCase_ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad _A = torch.true_divide(lowerCAmelCase_ , lowerCAmelCase_ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[Any]) -> Tuple: boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def snake_case ( snake_case__ :Optional[int] , snake_case__ :Tuple[int, int]) -> Optional[Any]: assert torch.isfinite(snake_case__).all(), "Box tensor contains infinite or NaN!" _A , _A = box_size tensor[:, 0].clamp_(min=0 , max=snake_case__) tensor[:, 1].clamp_(min=0 , max=snake_case__) tensor[:, 2].clamp_(min=0 , max=snake_case__) tensor[:, 3].clamp_(min=0 , max=snake_case__)
83
1
from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def snake_case ( snake_case__ :int) -> int: _A = prime_factors(snake_case__) if is_square_free(snake_case__): return -1 if len(snake_case__) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
83
from collections import defaultdict def snake_case ( snake_case__ :int) -> int: _A = 1 _A = True for v in tree[start]: if v not in visited: ret += dfs(snake_case__) if ret % 2 == 0: cuts.append(snake_case__) return ret def snake_case ( ) -> Any: dfs(1) if __name__ == "__main__": _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10, 9 _SCREAMING_SNAKE_CASE = defaultdict(list) _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
83
1
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow _SCREAMING_SNAKE_CASE = logging.getLogger() @unittest.skip('''Temporarily disable the doc tests.''' ) @require_torch @require_tf @slow class a ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Tuple: _A = [file for file in os.listdir(lowerCAmelCase_ ) if os.path.isfile(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )] if identifier is not None: _A = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): for n_ in n_identifier: _A = [file for file in files if n_ not in file] else: _A = [file for file in files if n_identifier not in file] _A = ignore_files or [] ignore_files.append("""__init__.py""" ) _A = [file for file in files if file not in ignore_files] for file in files: # Open all files print("""Testing""" , lowerCAmelCase_ ) if only_modules: _A = file.split(""".""" )[0] try: _A = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) _A = doctest.DocTestSuite(lowerCAmelCase_ ) _A = unittest.TextTestRunner().run(lowerCAmelCase_ ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F'''{module_identifier} is not a module.''' ) else: _A = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def UpperCAmelCase ( self ) -> Any: _A = Path("""src/transformers""" ) _A = """modeling""" _A = [ """modeling_ctrl.py""", """modeling_tf_ctrl.py""", ] self.analyze_directory(lowerCAmelCase_ , identifier=lowerCAmelCase_ , ignore_files=lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> List[Any]: _A = Path("""src/transformers""" ) _A = """tokenization""" self.analyze_directory(lowerCAmelCase_ , identifier=lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Union[str, Any]: _A = Path("""src/transformers""" ) _A = """configuration""" self.analyze_directory(lowerCAmelCase_ , identifier=lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Any: _A = Path("""src/transformers""" ) _A = ["""configuration""", """modeling""", """tokenization"""] self.analyze_directory(lowerCAmelCase_ , n_identifier=lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Union[str, Any]: _A = Path("""docs/source""" ) _A = ["""favicon.ico"""] self.analyze_directory(lowerCAmelCase_ , ignore_files=lowerCAmelCase_ , only_modules=lowerCAmelCase_ )
83
import heapq def snake_case ( snake_case__ :dict) -> set[int]: _A = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(snake_case__ , [-1 * len(snake_case__), (key, value)]) # chosen_vertices = set of chosen vertices _A = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices _A = heapq.heappop(snake_case__)[1][0] chosen_vertices.add(snake_case__) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: _A = elem[1][1].index(snake_case__) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(snake_case__) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() _SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
83
1
def snake_case ( snake_case__ :int , snake_case__ :int) -> int: return int(input_a == input_a == 0) def snake_case ( ) -> None: print("""Truth Table of NOR Gate:""") print("""| Input 1 | Input 2 | Output |""") print(F'''| 0 | 0 | {nor_gate(0 , 0)} |''') print(F'''| 0 | 1 | {nor_gate(0 , 1)} |''') print(F'''| 1 | 0 | {nor_gate(1 , 0)} |''') print(F'''| 1 | 1 | {nor_gate(1 , 1)} |''') if __name__ == "__main__": import doctest doctest.testmod() main()
83
import math import unittest def snake_case ( snake_case__ :int) -> bool: assert isinstance(snake_case__ , snake_case__) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__) + 1) , 6): if number % i == 0 or number % (i + 2) == 0: return False return True class a ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> List[Any]: self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def UpperCAmelCase ( self ) -> Dict: with self.assertRaises(lowerCAmelCase_ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , ) self.assertFalse( is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
83
1
import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class a : """simple docstring""" lowerCamelCase :Optional[Union[str, Path]] = None lowerCamelCase :bool = False lowerCamelCase :bool = False lowerCamelCase :bool = False lowerCamelCase :Optional[Dict] = None lowerCamelCase :Optional[str] = None lowerCamelCase :bool = False lowerCamelCase :bool = False lowerCamelCase :bool = False lowerCamelCase :bool = True lowerCamelCase :Optional[int] = None lowerCamelCase :int = 1 lowerCamelCase :Optional[Union[str, bool]] = None lowerCamelCase :bool = False lowerCamelCase :Optional[Dict] = None lowerCamelCase :Optional[str] = None def UpperCAmelCase ( self ) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(lowerCAmelCase_ ) for k, v in self.__dict__.items()} )
83
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = {'configuration_encoder_decoder': ['EncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['EncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['TFEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['FlaxEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
83
1
from __future__ import annotations _SCREAMING_SNAKE_CASE = list[tuple[int, int]] _SCREAMING_SNAKE_CASE = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] _SCREAMING_SNAKE_CASE = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class a : """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> Optional[int]: _A = pos_x _A = pos_y _A = (pos_y, pos_x) _A = goal_x _A = goal_y _A = g_cost _A = parent _A = self.calculate_heuristic() def UpperCAmelCase ( self ) -> float: _A = abs(self.pos_x - self.goal_x ) _A = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , lowerCAmelCase_ ) -> bool: return self.f_cost < other.f_cost class a : """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]: _A = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCAmelCase_ ) _A = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCAmelCase_ ) _A = [self.start] _A = [] _A = False def UpperCAmelCase ( self ) -> Path | None: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() _A = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: _A = True return self.retrace_path(lowerCAmelCase_ ) self.closed_nodes.append(lowerCAmelCase_ ) _A = self.get_successors(lowerCAmelCase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowerCAmelCase_ ) else: # retrieve the best current path _A = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowerCAmelCase_ ) else: self.open_nodes.append(lowerCAmelCase_ ) if not self.reached: return [self.start.pos] return None def UpperCAmelCase ( self , lowerCAmelCase_ ) -> list[Node]: _A = [] for action in delta: _A = parent.pos_x + action[1] _A = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCAmelCase_ , ) ) return successors def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Path: _A = node _A = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) _A = current_node.parent path.reverse() return path if __name__ == "__main__": _SCREAMING_SNAKE_CASE = (0, 0) _SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') _SCREAMING_SNAKE_CASE = GreedyBestFirst(init, goal) _SCREAMING_SNAKE_CASE = greedy_bf.search() if path: for pos_x, pos_y in path: _SCREAMING_SNAKE_CASE = 2 for elem in grid: print(elem)
83
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) @add_end_docstrings(__lowerCAmelCase ) class a ( __lowerCAmelCase ): """simple docstring""" def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]: super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) self.check_model_type(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple: _A , _A = {}, {} if padding is not None: _A = padding if truncation is not None: _A = truncation if top_k is not None: _A = top_k return preprocess_params, {}, postprocess_params def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ) -> Union[str, Any]: if isinstance(lowerCAmelCase_ , (Image.Image, str) ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _A = {"""image""": image, """question""": question} else: _A = image _A = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ ) return results def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any: _A = load_image(inputs["""image"""] ) _A = self.tokenizer( inputs["""question"""] , return_tensors=self.framework , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ ) _A = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework ) model_inputs.update(lowerCAmelCase_ ) return model_inputs def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: _A = self.model(**lowerCAmelCase_ ) return model_outputs def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> Union[str, Any]: if top_k > self.model.config.num_labels: _A = self.model.config.num_labels if self.framework == "pt": _A = model_outputs.logits.sigmoid()[0] _A , _A = probs.topk(lowerCAmelCase_ ) else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) _A = scores.tolist() _A = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
83
1
def snake_case ( snake_case__ :int = 1_000) -> int: _A = 3 _A = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(F'''{solution() = }''')
83
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :str , snake_case__ :PreTrainedTokenizer , snake_case__ :int , snake_case__ :Optional[int] = None , ) -> Optional[int]: _A = {} if train_file is not None: _A = [train_file] if eval_file is not None: _A = [eval_file] if test_file is not None: _A = [test_file] _A = datasets.load_dataset("""csv""" , data_files=snake_case__) _A = list(ds[list(files.keys())[0]].features.keys()) _A = features_name.pop(snake_case__) _A = list(set(ds[list(files.keys())[0]][label_name])) _A = {label: i for i, label in enumerate(snake_case__)} _A = tokenizer.model_input_names _A = {} if len(snake_case__) == 1: for k in files.keys(): _A = ds[k].map( lambda snake_case__: tokenizer.batch_encode_plus( example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""") , batched=snake_case__ , ) elif len(snake_case__) == 2: for k in files.keys(): _A = ds[k].map( lambda snake_case__: tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""" , ) , batched=snake_case__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) _A = ( tf.data.Dataset.from_generator( snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: _A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN]))) _A = ( tf.data.Dataset.from_generator( snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: _A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION]))) _A = ( tf.data.Dataset.from_generator( snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: _A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST]))) return train_ds, val_ds, test_ds, labelaid _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) @dataclass class a : """simple docstring""" lowerCamelCase :int = field(metadata={'''help''': '''Which column contains the label'''} ) lowerCamelCase :str = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the training file'''} ) lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the development file'''} ) lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the test file'''} ) lowerCamelCase :int = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) lowerCamelCase :bool = field( default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class a : """simple docstring""" lowerCamelCase :str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) lowerCamelCase :Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCamelCase :Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. lowerCamelCase :Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def snake_case ( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) _A , _A , _A = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""") # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.info( F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, ''' F'''16-bits training: {training_args.fpaa}''') logger.info(F'''Training/evaluation parameters {training_args}''') # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _A , _A , _A , _A = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) _A = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): _A = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , ) def compute_metrics(snake_case__ :EvalPrediction) -> Dict: _A = np.argmax(p.predictions , axis=1) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer _A = TFTrainer( model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir) # Evaluation _A = {} if training_args.do_eval: logger.info("""*** Evaluate ***""") _A = trainer.evaluate() _A = os.path.join(training_args.output_dir , """eval_results.txt""") with open(snake_case__ , """w""") as writer: logger.info("""***** Eval results *****""") for key, value in result.items(): logger.info(F''' {key} = {value}''') writer.write(F'''{key} = {value}\n''') results.update(snake_case__) return results if __name__ == "__main__": main()
83
1
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Optional[int] = '''vision-encoder-decoder''' lowerCamelCase :Tuple = True def __init__( self , **lowerCAmelCase_ ) -> Tuple: super().__init__(**lowerCAmelCase_ ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( F'''A configuraton of type {self.model_type} cannot be instantiated because ''' F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' ) _A = kwargs.pop("""encoder""" ) _A = encoder_config.pop("""model_type""" ) _A = kwargs.pop("""decoder""" ) _A = decoder_config.pop("""model_type""" ) _A = AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ ) _A = AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ ) _A = True @classmethod def UpperCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) -> PretrainedConfig: logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) _A = True _A = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> List[str]: _A = copy.deepcopy(self.__dict__ ) _A = self.encoder.to_dict() _A = self.decoder.to_dict() _A = self.__class__.model_type return output class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Tuple = version.parse('''1.11''' ) @property def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCAmelCase ( self ) -> float: return 1E-4 @property def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class a ( __lowerCAmelCase ): """simple docstring""" @property def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: _A = OrderedDict() _A = {0: """batch""", 1: """past_decoder_sequence + sequence"""} _A = {0: """batch""", 1: """past_decoder_sequence + sequence"""} _A = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]: import torch _A = OrderedDict() _A = super().generate_dummy_inputs( lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ ) _A , _A = dummy_input["""input_ids"""].shape _A = (batch, encoder_sequence, self._config.encoder_hidden_size) _A = dummy_input.pop("""input_ids""" ) _A = dummy_input.pop("""attention_mask""" ) _A = torch.zeros(lowerCAmelCase_ ) return common_inputs class a ( __lowerCAmelCase ): """simple docstring""" @property def UpperCAmelCase ( self ) -> None: pass def UpperCAmelCase ( self , lowerCAmelCase_ ) -> OnnxConfig: return VisionEncoderDecoderEncoderOnnxConfig(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = "default" ) -> OnnxConfig: _A = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(lowerCAmelCase_ , lowerCAmelCase_ )
83
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Union[str, Any] = '''speech_to_text''' lowerCamelCase :List[str] = ['''past_key_values'''] lowerCamelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , lowerCAmelCase_=1_00_00 , lowerCAmelCase_=12 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=60_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=2 , lowerCAmelCase_=(5, 5) , lowerCAmelCase_=10_24 , lowerCAmelCase_=80 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Tuple: _A = vocab_size _A = d_model _A = encoder_ffn_dim _A = encoder_layers _A = encoder_attention_heads _A = decoder_ffn_dim _A = decoder_layers _A = decoder_attention_heads _A = dropout _A = attention_dropout _A = activation_dropout _A = activation_function _A = init_std _A = encoder_layerdrop _A = decoder_layerdrop _A = use_cache _A = encoder_layers _A = scale_embedding # scale factor will be sqrt(d_model) if True _A = max_source_positions _A = max_target_positions _A = num_conv_layers _A = list(lowerCAmelCase_ ) _A = conv_channels _A = input_feat_per_channel _A = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """ F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' F'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
83
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = { 'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig'] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ 'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'ResNetForImageClassification', 'ResNetModel', 'ResNetPreTrainedModel', 'ResNetBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ 'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFResNetForImageClassification', 'TFResNetModel', 'TFResNetPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ 'FlaxResNetForImageClassification', 'FlaxResNetModel', 'FlaxResNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure)
83
from __future__ import annotations from collections.abc import Callable def snake_case ( snake_case__ :Callable[[int | float], int | float] , snake_case__ :int | float , snake_case__ :int | float , snake_case__ :int = 100 , ) -> float: _A = x_start _A = fnc(snake_case__) _A = 0.0 for _ in range(snake_case__): # Approximates small segments of curve as linear and solve # for trapezoidal area _A = (x_end - x_start) / steps + xa _A = fnc(snake_case__) area += abs(fxa + fxa) * (xa - xa) / 2 # Increment step _A = xa _A = fxa return area if __name__ == "__main__": def snake_case ( snake_case__ :Tuple) -> List[str]: return x**3 + x**2 print('f(x) = x^3 + x^2') print('The area between the curve, x = -5, x = 5 and the x axis is:') _SCREAMING_SNAKE_CASE = 10 while i <= 100_000: print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''') i *= 10
83
1
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar _SCREAMING_SNAKE_CASE = TypeVar('T') class a ( Generic[T] ): """simple docstring""" def __init__( self , lowerCAmelCase_ ) -> Dict: _A = data _A = None def __str__( self ) -> str: return F'''{self.data}''' class a ( Generic[T] ): """simple docstring""" def __init__( self ) -> None: _A = None def __iter__( self ) -> Iterator[T]: _A = self.top while node: yield node.data _A = node.next def __str__( self ) -> str: return "->".join([str(lowerCAmelCase_ ) for item in self] ) def __len__( self ) -> int: return len(tuple(iter(self ) ) ) def UpperCAmelCase ( self ) -> bool: return self.top is None def UpperCAmelCase ( self , lowerCAmelCase_ ) -> None: _A = Node(lowerCAmelCase_ ) if not self.is_empty(): _A = self.top _A = node def UpperCAmelCase ( self ) -> T: if self.is_empty(): raise IndexError("""pop from empty stack""" ) assert isinstance(self.top , lowerCAmelCase_ ) _A = self.top _A = self.top.next return pop_node.data def UpperCAmelCase ( self ) -> T: if self.is_empty(): raise IndexError("""peek from empty stack""" ) assert self.top is not None return self.top.data def UpperCAmelCase ( self ) -> None: _A = None if __name__ == "__main__": from doctest import testmod testmod()
83
import numpy as np import qiskit def snake_case ( snake_case__ :int = 8 , snake_case__ :int | None = None) -> str: _A = np.random.default_rng(seed=snake_case__) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. _A = 6 * key_len # Measurement basis for Alice's qubits. _A = rng.integers(2 , size=snake_case__) # The set of states Alice will prepare. _A = rng.integers(2 , size=snake_case__) # Measurement basis for Bob's qubits. _A = rng.integers(2 , size=snake_case__) # Quantum Circuit to simulate BB84 _A = qiskit.QuantumCircuit(snake_case__ , name="""BB84""") # Alice prepares her qubits according to rules above. for index, _ in enumerate(snake_case__): if alice_state[index] == 1: bbaa_circ.x(snake_case__) if alice_basis[index] == 1: bbaa_circ.h(snake_case__) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(snake_case__): if bob_basis[index] == 1: bbaa_circ.h(snake_case__) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. _A = qiskit.Aer.get_backend("""aer_simulator""") # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. _A = qiskit.execute(snake_case__ , snake_case__ , shots=1 , seed_simulator=snake_case__) # Returns the result of measurement. _A = job.result().get_counts(snake_case__).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. _A = """""".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( snake_case__ , snake_case__ , snake_case__) if alice_basis_bit == bob_basis_bit ]) # Get final key. Pad with 0 if too short, otherwise truncate. _A = gen_key[:key_len] if len(snake_case__) >= key_len else gen_key.ljust(snake_case__ , """0""") return key if __name__ == "__main__": print(F'''The generated key is : {bbaa(8, seed=0)}''') from doctest import testmod testmod()
83
1
from __future__ import annotations _SCREAMING_SNAKE_CASE = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] _SCREAMING_SNAKE_CASE = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def snake_case ( snake_case__ :list[float]) -> list[float]: _A = [] _A = len(snake_case__) for i in range(snake_case__): _A = -1 for j in range(i + 1 , snake_case__): if arr[i] < arr[j]: _A = arr[j] break result.append(snake_case__) return result def snake_case ( snake_case__ :list[float]) -> list[float]: _A = [] for i, outer in enumerate(snake_case__): _A = -1 for inner in arr[i + 1 :]: if outer < inner: _A = inner break result.append(snake_case__) return result def snake_case ( snake_case__ :list[float]) -> list[float]: _A = len(snake_case__) _A = [] _A = [-1] * arr_size for index in reversed(range(snake_case__)): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: _A = stack[-1] stack.append(arr[index]) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) _SCREAMING_SNAKE_CASE = ( 'from __main__ import arr, next_greatest_element_slow, ' 'next_greatest_element_fast, next_greatest_element' ) print( 'next_greatest_element_slow():', timeit('next_greatest_element_slow(arr)', setup=setup), ) print( 'next_greatest_element_fast():', timeit('next_greatest_element_fast(arr)', setup=setup), ) print( ' next_greatest_element():', timeit('next_greatest_element(arr)', setup=setup), )
83
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def snake_case ( snake_case__ :int) -> Optional[int]: return EnvironmentCommand() def snake_case ( snake_case__ :Tuple) -> List[str]: return EnvironmentCommand(args.accelerate_config_file) class a ( __lowerCAmelCase ): """simple docstring""" @staticmethod def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple: _A = parser.add_parser("""env""" ) download_parser.set_defaults(func=lowerCAmelCase_ ) download_parser.add_argument( """--accelerate-config_file""" , default=lowerCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , ) download_parser.set_defaults(func=lowerCAmelCase_ ) def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ ) -> None: _A = accelerate_config_file def UpperCAmelCase ( self ) -> Dict: _A = """not installed""" if is_safetensors_available(): import safetensors _A = safetensors.__version__ elif importlib.util.find_spec("""safetensors""" ) is not None: import safetensors _A = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.''' _A = """not installed""" _A = _A = """not found""" if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file _A = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ): _A = load_config_from_file(self._accelerate_config_file ).to_dict() _A = ( """\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else F'''\t{accelerate_config}''' ) _A = """not installed""" _A = """NA""" if is_torch_available(): import torch _A = torch.__version__ _A = torch.cuda.is_available() _A = """not installed""" _A = """NA""" if is_tf_available(): import tensorflow as tf _A = tf.__version__ try: # deprecated in v2.1 _A = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool _A = bool(tf.config.list_physical_devices("""GPU""" ) ) _A = """not installed""" _A = """not installed""" _A = """not installed""" _A = """NA""" if is_flax_available(): import flax import jax import jaxlib _A = flax.__version__ _A = jax.__version__ _A = jaxlib.__version__ _A = jax.lib.xla_bridge.get_backend().platform _A = { """`transformers` version""": version, """Platform""": platform.platform(), """Python version""": platform.python_version(), """Huggingface_hub version""": huggingface_hub.__version__, """Safetensors version""": F'''{safetensors_version}''', """Accelerate version""": F'''{accelerate_version}''', """Accelerate config""": F'''{accelerate_config_str}''', """PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''', """Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''', """Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''', """Jax version""": F'''{jax_version}''', """JaxLib version""": F'''{jaxlib_version}''', """Using GPU in script?""": """<fill in>""", """Using distributed or parallel set-up in script?""": """<fill in>""", } print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" ) print(self.format_dict(lowerCAmelCase_ ) ) return info @staticmethod def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple: return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
83
1
import math import sys import cva import numpy as np def snake_case ( snake_case__ :np.ndarray , snake_case__ :float) -> np.ndarray: # For applying gaussian function for each element in matrix. _A = math.sqrt(snake_case__) _A = 1 / (sigma * math.sqrt(2 * math.pi)) return cons * np.exp(-((img / sigma) ** 2) * 0.5) def snake_case ( snake_case__ :np.ndarray , snake_case__ :int , snake_case__ :int , snake_case__ :int) -> np.ndarray: _A = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def snake_case ( snake_case__ :int , snake_case__ :float) -> np.ndarray: # Creates a gaussian kernel of given dimension. _A = np.zeros((kernel_size, kernel_size)) for i in range(0 , snake_case__): for j in range(0 , snake_case__): _A = math.sqrt( abs(i - kernel_size // 2) ** 2 + abs(j - kernel_size // 2) ** 2) return vec_gaussian(snake_case__ , snake_case__) def snake_case ( snake_case__ :np.ndarray , snake_case__ :float , snake_case__ :float , snake_case__ :int , ) -> np.ndarray: _A = np.zeros(img.shape) _A = get_gauss_kernel(snake_case__ , snake_case__) _A , _A = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2): for j in range(kernel_size // 2 , size_y - kernel_size // 2): _A = get_slice(snake_case__ , snake_case__ , snake_case__ , snake_case__) _A = img_s - img_s[kernel_size // 2, kernel_size // 2] _A = vec_gaussian(snake_case__ , snake_case__) _A = np.multiply(snake_case__ , snake_case__) _A = np.multiply(snake_case__ , snake_case__) _A = np.sum(snake_case__) / np.sum(snake_case__) _A = val return imga def snake_case ( snake_case__ :list) -> tuple: _A = args[1] if args[1:] else """../image_data/lena.jpg""" _A = float(args[2]) if args[2:] else 1.0 _A = float(args[3]) if args[3:] else 1.0 if args[4:]: _A = int(args[4]) _A = kernel_size + abs(kernel_size % 2 - 1) else: _A = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parse_args(sys.argv) _SCREAMING_SNAKE_CASE = cva.imread(filename, 0) cva.imshow('input image', img) _SCREAMING_SNAKE_CASE = img / 255 _SCREAMING_SNAKE_CASE = out.astype('float32') _SCREAMING_SNAKE_CASE = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) _SCREAMING_SNAKE_CASE = out * 255 _SCREAMING_SNAKE_CASE = np.uinta(out) cva.imshow('output image', out) cva.waitKey(0) cva.destroyAllWindows()
83
import colorsys from PIL import Image # type: ignore def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float: _A = x _A = y for step in range(snake_case__): # noqa: B007 _A = a * a - b * b + x _A = 2 * a * b + y _A = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def snake_case ( snake_case__ :float) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def snake_case ( snake_case__ :float) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1)) def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image: _A = Image.new("""RGB""" , (image_width, image_height)) _A = img.load() # loop through the image-coordinates for image_x in range(snake_case__): for image_y in range(snake_case__): # determine the figure-coordinates based on the image-coordinates _A = figure_width / image_width * image_height _A = figure_center_x + (image_x / image_width - 0.5) * figure_width _A = figure_center_y + (image_y / image_height - 0.5) * figure_height _A = get_distance(snake_case__ , snake_case__ , snake_case__) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: _A = get_color_coded_rgb(snake_case__) else: _A = get_black_and_white_rgb(snake_case__) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _SCREAMING_SNAKE_CASE = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
83
1
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) _SCREAMING_SNAKE_CASE = { 'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in', 'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0', 'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out', 'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1', 'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm', 'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2', 'mask_downscaling.0': 'mask_embed.conv1', 'mask_downscaling.1': 'mask_embed.layer_norm1', 'mask_downscaling.3': 'mask_embed.conv2', 'mask_downscaling.4': 'mask_embed.layer_norm2', 'mask_downscaling.6': 'mask_embed.conv3', 'point_embeddings': 'point_embed', 'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding', 'image_encoder': 'vision_encoder', 'neck.0': 'neck.conv1', 'neck.1': 'neck.layer_norm1', 'neck.2': 'neck.conv2', 'neck.3': 'neck.layer_norm2', 'patch_embed.proj': 'patch_embed.projection', '.norm': '.layer_norm', 'blocks': 'layers', } def snake_case ( snake_case__ :Any) -> str: _A = {} state_dict.pop("""pixel_mean""" , snake_case__) state_dict.pop("""pixel_std""" , snake_case__) _A = R""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*""" for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _A = key.replace(snake_case__ , snake_case__) if re.match(snake_case__ , snake_case__): _A = int(re.match(snake_case__ , snake_case__).group(2)) if layer_nb == 0: _A = key.replace("""layers.0""" , """proj_in""") elif layer_nb == 1: _A = key.replace("""layers.1""" , """layers.0""") elif layer_nb == 2: _A = key.replace("""layers.2""" , """proj_out""") _A = value _A = model_state_dict[ """prompt_encoder.shared_embedding.positional_embedding""" ] return model_state_dict def snake_case ( snake_case__ :Any , snake_case__ :Union[str, Any] , snake_case__ :Dict , snake_case__ :Optional[Any]="ybelkada/segment-anything") -> Union[str, Any]: _A = hf_hub_download(snake_case__ , F'''checkpoints/{model_name}.pth''') if "sam_vit_b" in model_name: _A = SamConfig() elif "sam_vit_l" in model_name: _A = SamVisionConfig( hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) _A = SamConfig( vision_config=snake_case__ , ) elif "sam_vit_h" in model_name: _A = SamVisionConfig( hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) _A = SamConfig( vision_config=snake_case__ , ) _A = torch.load(snake_case__ , map_location="""cpu""") _A = replace_keys(snake_case__) _A = SamImageProcessor() _A = SamProcessor(image_processor=snake_case__) _A = SamModel(snake_case__) hf_model.load_state_dict(snake_case__) _A = hf_model.to("""cuda""") _A = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png""" _A = Image.open(requests.get(snake_case__ , stream=snake_case__).raw).convert("""RGB""") _A = [[[400, 650]]] _A = [[1]] _A = processor(images=np.array(snake_case__) , return_tensors="""pt""").to("""cuda""") with torch.no_grad(): _A = hf_model(**snake_case__) _A = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579_8902_5115_9668 _A = processor( images=np.array(snake_case__) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors="""pt""").to("""cuda""") with torch.no_grad(): _A = hf_model(**snake_case__) _A = output.iou_scores.squeeze() assert scores[-1].item() == 0.9712_6030_9219_3604 _A = ((75, 275, 1_725, 850),) _A = processor(images=np.array(snake_case__) , input_boxes=snake_case__ , return_tensors="""pt""").to("""cuda""") with torch.no_grad(): _A = hf_model(**snake_case__) _A = output.iou_scores.squeeze() assert scores[-1].item() == 0.8686_0156_0592_6514 # Test with 2 points and 1 image. _A = [[[400, 650], [800, 650]]] _A = [[1, 1]] _A = processor( images=np.array(snake_case__) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors="""pt""").to("""cuda""") with torch.no_grad(): _A = hf_model(**snake_case__) _A = output.iou_scores.squeeze() assert scores[-1].item() == 0.9936_0477_9243_4692 if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() _SCREAMING_SNAKE_CASE = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195'] parser.add_argument( '--model_name', default='sam_vit_h_4b8939', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) parser.add_argument( '--model_hub_id', default='ybelkada/segment-anything', choices=choices, type=str, help='Path to hf config.json of model to convert', ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
83
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets _SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' _SCREAMING_SNAKE_CASE = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' _SCREAMING_SNAKE_CASE = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def snake_case ( snake_case__ :Optional[Any] , snake_case__ :str , snake_case__ :List[str]=False , snake_case__ :Dict=False , snake_case__ :Any=True , snake_case__ :List[str]=False , snake_case__ :Optional[Any]="dummy_doc") -> List[Any]: _A = {doc: key_lines} _A = {doc: sys_lines} _A = {} _A = 0 _A = 0 _A = 0 _A = 0 _A = 0 _A = 0 _A , _A = reader.get_doc_mentions(snake_case__ , key_doc_lines[doc] , snake_case__) key_singletons_num += singletons_num if NP_only or min_span: _A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__) _A , _A = reader.get_doc_mentions(snake_case__ , sys_doc_lines[doc] , snake_case__) sys_singletons_num += singletons_num if NP_only or min_span: _A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__) if remove_nested: _A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _A = reader.get_mention_assignments(snake_case__ , snake_case__) _A = reader.get_mention_assignments(snake_case__ , snake_case__) _A = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''') logger.info( """Number of resulting singleton clusters in the key """ F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''') if not keep_singletons: logger.info( F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' """files, respectively""") return doc_coref_infos def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Dict , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Tuple) -> int: _A = get_coref_infos(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__) _A = {} _A = 0 _A = 0 for name, metric in metrics: _A , _A , _A = evaluator.evaluate_documents(snake_case__ , snake_case__ , beta=1) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa}) logger.info( name.ljust(10) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , ) if conll_subparts_num == 3: _A = (conll / 3) * 100 logger.info(F'''CoNLL score: {conll:.2f}''') output_scores.update({"""conll_score""": conll}) return output_scores def snake_case ( snake_case__ :Union[str, Any]) -> List[Any]: _A = False for line in key_lines: if not line.startswith("""#"""): if len(line.split()) > 6: _A = line.split()[5] if not parse_col == "-": _A = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): """simple docstring""" def UpperCAmelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ] , ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Union[str, Any]: _A = [ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: _A = util.check_gold_parse_annotation(lowerCAmelCase_ ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _A = evaluate( key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , ) return score
83
1
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class a ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase ( self ) -> Tuple: _A = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" ) _A = AutoTokenizer.from_pretrained("""google/mt5-small""" ) _A = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids _A = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids _A = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id ) _A = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits _A = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean() _A = -(labels.shape[-1] * loss.item()) _A = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
83
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } _SCREAMING_SNAKE_CASE = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } _SCREAMING_SNAKE_CASE = {'facebook/blenderbot_small-90M': 512} def snake_case ( snake_case__ :Tuple) -> str: _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char)) _A = char _A = set(snake_case__) return pairs class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :List[Any] = VOCAB_FILES_NAMES lowerCamelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase :int = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="__start__" , lowerCAmelCase_="__end__" , lowerCAmelCase_="__unk__" , lowerCAmelCase_="__null__" , **lowerCAmelCase_ , ) -> int: super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ ) with open(lowerCAmelCase_ , encoding="""utf-8""" ) as vocab_handle: _A = json.load(lowerCAmelCase_ ) _A = {v: k for k, v in self.encoder.items()} with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle: _A = merges_handle.read().split("""\n""" )[1:-1] _A = [tuple(merge.split() ) for merge in merges] _A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _A = {} @property def UpperCAmelCase ( self ) -> int: return len(self.encoder ) def UpperCAmelCase ( self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: if token in self.cache: return self.cache[token] _A = re.sub("""([.,!?()])""" , r""" \1""" , lowerCAmelCase_ ) _A = re.sub("""(')""" , r""" \1 """ , lowerCAmelCase_ ) _A = re.sub(r"""\s{2,}""" , """ """ , lowerCAmelCase_ ) if "\n" in token: _A = token.replace("""\n""" , """ __newln__""" ) _A = token.split(""" """ ) _A = [] for token in tokens: if not len(lowerCAmelCase_ ): continue _A = token.lower() _A = tuple(lowerCAmelCase_ ) _A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) _A = get_pairs(lowerCAmelCase_ ) if not pairs: words.append(lowerCAmelCase_ ) continue while True: _A = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break _A , _A = bigram _A = [] _A = 0 while i < len(lowerCAmelCase_ ): try: _A = word.index(lowerCAmelCase_ , lowerCAmelCase_ ) new_word.extend(word[i:j] ) _A = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(lowerCAmelCase_ ) _A = new_word if len(lowerCAmelCase_ ) == 1: break else: _A = get_pairs(lowerCAmelCase_ ) _A = """@@ """.join(lowerCAmelCase_ ) _A = word[:-4] _A = word words.append(lowerCAmelCase_ ) return " ".join(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]: _A = [] _A = re.findall(r"""\S+\n?""" , lowerCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) ) return split_tokens def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int: _A = token.lower() return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: return self.decoder.get(lowerCAmelCase_ , self.unk_token ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: _A = """ """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip() return out_string def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _A = os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + """\n""" ) _A = 0 with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) _A = token_index writer.write(""" """.join(lowerCAmelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file
83
1
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def snake_case ( ) -> List[str]: _A , _A = 9, 14 # noqa: F841 _A = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _A = defaultdict(snake_case__) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost]) adjancency[nodea].append([nodea, cost]) _A = mst(snake_case__) _A = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _A = tuple(answer[:2]) _A = tuple(edge[::-1]) assert edge in result or reverse in result
83
_SCREAMING_SNAKE_CASE = { 'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...', '8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.', ':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.', '?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-', '(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/' } # Exclamation mark is not in ITU-R recommendation # fmt: on _SCREAMING_SNAKE_CASE = {value: key for key, value in MORSE_CODE_DICT.items()} def snake_case ( snake_case__ :str) -> str: return " ".join(MORSE_CODE_DICT[char] for char in message.upper()) def snake_case ( snake_case__ :str) -> str: return "".join(REVERSE_DICT[char] for char in message.split()) def snake_case ( ) -> None: _A = """Morse code here!""" print(snake_case__) _A = encrypt(snake_case__) print(snake_case__) _A = decrypt(snake_case__) print(snake_case__) if __name__ == "__main__": main()
83
1
'''simple docstring''' import random def snake_case ( snake_case__ :Optional[int] , snake_case__ :List[Any] , snake_case__ :List[str]) -> Union[str, Any]: _A = a[left_index] _A = left_index + 1 for j in range(left_index + 1 , SCREAMING_SNAKE_CASE_): if a[j] < pivot: _A , _A = a[i], a[j] i += 1 _A , _A = a[i - 1], a[left_index] return i - 1 def snake_case ( snake_case__ :int , snake_case__ :List[Any] , snake_case__ :Dict) -> Union[str, Any]: if left < right: _A = random.randint(SCREAMING_SNAKE_CASE_ , right - 1) _A , _A = ( a[left], a[pivot], ) # switches the pivot with the left most bound _A = partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) quick_sort_random( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) # recursive quicksort to the left of the pivot point quick_sort_random( SCREAMING_SNAKE_CASE_ , pivot_index + 1 , SCREAMING_SNAKE_CASE_) # recursive quicksort to the right of the pivot point def snake_case ( ) -> List[Any]: _A = input("""Enter numbers separated by a comma:\n""").strip() _A = [int(SCREAMING_SNAKE_CASE_) for item in user_input.split(""",""")] quick_sort_random(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_)) print(SCREAMING_SNAKE_CASE_) if __name__ == "__main__": main()
700
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { 'configuration_jukebox': [ 'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'JukeboxConfig', 'JukeboxPriorConfig', 'JukeboxVQVAEConfig', ], 'tokenization_jukebox': ['JukeboxTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ 'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'JukeboxModel', 'JukeboxPreTrainedModel', 'JukeboxVQVAE', 'JukeboxPrior', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
83
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { 'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json', } class a ( _snake_case ): """simple docstring""" lowerCamelCase :Union[str, Any] = '''blip_2_vision_model''' def __init__( self , lowerCAmelCase_=14_08 , lowerCAmelCase_=61_44 , lowerCAmelCase_=39 , lowerCAmelCase_=16 , lowerCAmelCase_=2_24 , lowerCAmelCase_=14 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0_0001 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1E-10 , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> int: super().__init__(**lowerCAmelCase__ ) _A = hidden_size _A = intermediate_size _A = num_hidden_layers _A = num_attention_heads _A = patch_size _A = image_size _A = initializer_range _A = attention_dropout _A = layer_norm_eps _A = hidden_act _A = qkv_bias @classmethod def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> "PretrainedConfig": cls._set_token_in_kwargs(lowerCAmelCase__ ) _A , _A = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ ) # get the vision config dict if we are loading from Blip2Config if config_dict.get("""model_type""" ) == "blip-2": _A = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ ) class a ( _snake_case ): """simple docstring""" lowerCamelCase :List[str] = '''blip_2_qformer''' def __init__( self , lowerCAmelCase_=3_05_22 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=0 , lowerCAmelCase_="absolute" , lowerCAmelCase_=2 , lowerCAmelCase_=14_08 , **lowerCAmelCase_ , ) -> Union[str, Any]: super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = cross_attention_frequency _A = encoder_hidden_size @classmethod def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> "PretrainedConfig": cls._set_token_in_kwargs(lowerCAmelCase__ ) _A , _A = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get("""model_type""" ) == "blip-2": _A = config_dict["""qformer_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ ) class a ( _snake_case ): """simple docstring""" lowerCamelCase :List[Any] = '''blip-2''' lowerCamelCase :Union[str, Any] = True def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=32 , **lowerCAmelCase_ ) -> Optional[int]: super().__init__(**lowerCAmelCase__ ) if vision_config is None: _A = {} logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" ) if qformer_config is None: _A = {} logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" ) if text_config is None: _A = {} logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" ) _A = BlipaVisionConfig(**lowerCAmelCase__ ) _A = BlipaQFormerConfig(**lowerCAmelCase__ ) _A = text_config["""model_type"""] if """model_type""" in text_config else """opt""" _A = CONFIG_MAPPING[text_model_type](**lowerCAmelCase__ ) _A = self.text_config.tie_word_embeddings _A = self.text_config.is_encoder_decoder _A = num_query_tokens _A = self.vision_config.hidden_size _A = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES _A = 1.0 _A = 0.02 @classmethod def UpperCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ , ) -> str: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , ) def UpperCAmelCase ( self ) -> Optional[Any]: _A = copy.deepcopy(self.__dict__ ) _A = self.vision_config.to_dict() _A = self.qformer_config.to_dict() _A = self.text_config.to_dict() _A = self.__class__.model_type return output
701
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Tuple = '''philschmid/bart-large-cnn-samsum''' lowerCamelCase :Tuple = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) lowerCamelCase :List[Any] = '''summarizer''' lowerCamelCase :List[str] = AutoTokenizer lowerCamelCase :Dict = AutoModelForSeqaSeqLM lowerCamelCase :int = ['''text'''] lowerCamelCase :List[Any] = ['''text'''] def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]: return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: return self.model.generate(**lowerCAmelCase_ )[0] def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
83
0
from __future__ import annotations import math def snake_case ( snake_case__ :List[Any]) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE) + 1) , 6): if number % i == 0 or number % (i + 2) == 0: return False return True _SCREAMING_SNAKE_CASE = [num for num in range(3, 100_001, 2) if not is_prime(num)] def snake_case ( snake_case__ :Optional[Any]) -> list[int]: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE): raise ValueError("""n must be an integer""") if n <= 0: raise ValueError("""n must be >= 0""") _A = [] for num in range(len(_SCREAMING_SNAKE_CASE)): _A = 0 while 2 * i * i <= odd_composites[num]: _A = odd_composites[num] - 2 * i * i if is_prime(_SCREAMING_SNAKE_CASE): break i += 1 else: list_nums.append(odd_composites[num]) if len(_SCREAMING_SNAKE_CASE) == n: return list_nums return [] def snake_case ( ) -> int: return compute_nums(1)[0] if __name__ == "__main__": print(F'''{solution() = }''')
702
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = [ ('bert.bert', 'visual_bert'), ('bert.cls', 'cls'), ('bert.classifier', 'cls'), ('token_type_embeddings_visual', 'visual_token_type_embeddings'), ('position_embeddings_visual', 'visual_position_embeddings'), ('projection', 'visual_projection'), ] _SCREAMING_SNAKE_CASE = [ 'nlvr2_coco_pre_trained.th', 'nlvr2_fine_tuned.th', 'nlvr2_pre_trained.th', 'vcr_coco_pre_train.th', 'vcr_fine_tune.th', 'vcr_pre_train.th', 'vqa_coco_pre_trained.th', 'vqa_fine_tuned.th', 'vqa_pre_trained.th', ] def snake_case ( snake_case__ :Union[str, Any]) -> Dict: _A = torch.load(snake_case__ , map_location="""cpu""") return sd def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :int=rename_keys_prefix) -> Optional[Any]: _A = OrderedDict() _A = torch.arange(config.max_position_embeddings).expand((1, -1)) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue _A = key for name_pair in rename_keys_prefix: _A = new_key.replace(name_pair[0] , name_pair[1]) _A = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately _A = new_d["""cls.predictions.bias"""] return new_d @torch.no_grad() def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple) -> int: assert ( checkpoint_path.split("""/""")[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: _A = """pretraining""" if "vcr" in checkpoint_path: _A = {"""visual_embedding_dim""": 512} elif "vqa_advanced" in checkpoint_path: _A = {"""visual_embedding_dim""": 2_048} elif "vqa" in checkpoint_path: _A = {"""visual_embedding_dim""": 2_048} elif "nlvr" in checkpoint_path: _A = {"""visual_embedding_dim""": 1_024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''') else: if "vcr" in checkpoint_path: _A = {"""visual_embedding_dim""": 512} _A = """multichoice""" elif "vqa_advanced" in checkpoint_path: _A = {"""visual_embedding_dim""": 2_048} _A = """vqa_advanced""" elif "vqa" in checkpoint_path: _A = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129} _A = """vqa""" elif "nlvr" in checkpoint_path: _A = { """visual_embedding_dim""": 1_024, """num_labels""": 2, } _A = """nlvr""" _A = VisualBertConfig(**snake_case__) # Load State Dict _A = load_state_dict(snake_case__) _A = get_new_dict(snake_case__ , snake_case__) if model_type == "pretraining": _A = VisualBertForPreTraining(snake_case__) elif model_type == "vqa": _A = VisualBertForQuestionAnswering(snake_case__) elif model_type == "nlvr": _A = VisualBertForVisualReasoning(snake_case__) elif model_type == "multichoice": _A = VisualBertForMultipleChoice(snake_case__) model.load_state_dict(snake_case__) # Save Checkpoints Path(snake_case__).mkdir(exist_ok=snake_case__) model.save_pretrained(snake_case__) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.') parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.') _SCREAMING_SNAKE_CASE = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
83
0
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } _SCREAMING_SNAKE_CASE = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } _SCREAMING_SNAKE_CASE = {"facebook/blenderbot_small-90M": 512} def snake_case ( snake_case__ :Dict) -> Optional[Any]: _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char)) _A = char _A = set(snake_case__) return pairs class a ( _SCREAMING_SNAKE_CASE ): """simple docstring""" lowerCamelCase :Optional[Any] = VOCAB_FILES_NAMES lowerCamelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase :Any = ["input_ids", "attention_mask"] def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="__start__" , lowerCAmelCase_="__end__" , lowerCAmelCase_="__unk__" , lowerCAmelCase_="__null__" , **lowerCAmelCase_ , ) -> List[Any]: super().__init__(unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ ) with open(A_ , encoding="""utf-8""" ) as vocab_handle: _A = json.load(A_ ) _A = {v: k for k, v in self.encoder.items()} with open(A_ , encoding="""utf-8""" ) as merges_handle: _A = merges_handle.read().split("""\n""" )[1:-1] _A = [tuple(merge.split() ) for merge in merges] _A = dict(zip(A_ , range(len(A_ ) ) ) ) _A = {} @property def UpperCAmelCase ( self ) -> int: return len(self.encoder ) def UpperCAmelCase ( self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: if token in self.cache: return self.cache[token] _A = re.sub("""([.,!?()])""" , r""" \1""" , A_ ) _A = re.sub("""(\')""" , r""" \1 """ , A_ ) _A = re.sub(r"""\s{2,}""" , """ """ , A_ ) if "\n" in token: _A = token.replace("""\n""" , """ __newln__""" ) _A = token.split(""" """ ) _A = [] for token in tokens: if not len(A_ ): continue _A = token.lower() _A = tuple(A_ ) _A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) _A = get_pairs(A_ ) if not pairs: words.append(A_ ) continue while True: _A = min(A_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(A_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break _A , _A = bigram _A = [] _A = 0 while i < len(A_ ): try: _A = word.index(A_ , A_ ) new_word.extend(word[i:j] ) _A = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(A_ ) _A = new_word if len(A_ ) == 1: break else: _A = get_pairs(A_ ) _A = """@@ """.join(A_ ) _A = word[:-4] _A = word words.append(A_ ) return " ".join(A_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]: _A = [] _A = re.findall(r"""\S+\n?""" , A_ ) for token in words: split_tokens.extend(list(self.bpe(A_ ).split(""" """ ) ) ) return split_tokens def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int: _A = token.lower() return self.encoder.get(A_ , self.encoder.get(self.unk_token ) ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: return self.decoder.get(A_ , self.unk_token ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: _A = """ """.join(A_ ).replace("""@@ """ , """""" ).strip() return out_string def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _A = os.path.join( A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(A_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + """\n""" ) _A = 0 with open(A_ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) _A = token_index writer.write(""" """.join(A_ ) + """\n""" ) index += 1 return vocab_file, merge_file
703
from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class a ( __lowerCAmelCase ): """simple docstring""" def UpperCAmelCase ( self ) -> List[str]: return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def UpperCAmelCase ( self ) -> Optional[int]: _A = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]} return Dataset.from_dict(lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Dict: _A = self._create_example_records() _A = Dataset.from_list(lowerCAmelCase_ ) self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] ) for i, r in enumerate(lowerCAmelCase_ ): self.assertDictEqual(lowerCAmelCase_ , example_records[i] ) def UpperCAmelCase ( self ) -> str: _A = self._create_example_records() _A = Dataset.from_list(lowerCAmelCase_ ) _A = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} ) self.assertEqual(dset.info , dset_from_dict.info ) def UpperCAmelCase ( self ) -> Any: # checks what happens with missing columns _A = [{"""col_1""": 1}, {"""col_2""": """x"""}] _A = Dataset.from_list(lowerCAmelCase_ ) self.assertDictEqual(dset[0] , {"""col_1""": 1} ) self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns def UpperCAmelCase ( self ) -> Tuple: # checks if the type can be inferred from the second record _A = [{"""col_1""": []}, {"""col_1""": [1, 2]}] _A = Dataset.from_list(lowerCAmelCase_ ) self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) ) def UpperCAmelCase ( self ) -> Any: _A = Dataset.from_list([] ) self.assertEqual(len(lowerCAmelCase_ ) , 0 ) self.assertListEqual(dset.column_names , [] )
83
0
import torch from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Optional[int] = '''microsoft/speecht5_tts''' lowerCamelCase :Optional[int] = ( '''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the ''' '''text to read (in English) and returns a waveform object containing the sound.''' ) lowerCamelCase :int = '''text_reader''' lowerCamelCase :str = SpeechTaProcessor lowerCamelCase :Any = SpeechTaForTextToSpeech lowerCamelCase :int = SpeechTaHifiGan lowerCamelCase :Tuple = ['''text'''] lowerCamelCase :List[Any] = ['''audio'''] def UpperCAmelCase ( self ) -> Any: if self.post_processor is None: _A = 'microsoft/speecht5_hifigan' super().setup() def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Dict: _A = self.pre_processor(text=UpperCamelCase_ , return_tensors="""pt""" , truncation=UpperCamelCase_ ) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" ) _A = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" ) _A = torch.tensor(embeddings_dataset[73_05]["""xvector"""] ).unsqueeze(0 ) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: with torch.no_grad(): return self.model.generate_speech(**UpperCamelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int: with torch.no_grad(): return self.post_processor(UpperCamelCase_ ).cpu().detach()
704
def snake_case ( snake_case__ :int = 1_000_000) -> int: _A = set(range(3 , snake_case__ , 2)) primes.add(2) for p in range(3 , snake_case__ , 2): if p not in primes: continue primes.difference_update(set(range(p * p , snake_case__ , snake_case__))) _A = [float(snake_case__) for n in range(limit + 1)] for p in primes: for n in range(snake_case__ , limit + 1 , snake_case__): phi[n] *= 1 - 1 / p return int(sum(phi[2:])) if __name__ == "__main__": print(F'''{solution() = }''')
83
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class a ( lowercase__ ): """simple docstring""" lowerCamelCase :Optional[Any] = '''ibert''' def __init__( self , lowerCAmelCase_=3_05_22 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=False , lowerCAmelCase_="none" , **lowerCAmelCase_ , ) -> Optional[Any]: super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = quant_mode _A = force_dequant class a ( lowercase__ ): """simple docstring""" @property def UpperCAmelCase ( self ) -> Optional[int]: if self.task == "multiple-choice": _A = {0: "batch", 1: "choice", 2: "sequence"} else: _A = {0: "batch", 1: "sequence"} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
705
import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class a ( __lowerCAmelCase ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Union[str, Any]: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_input_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = relative_attention _A = position_biased_input _A = pos_att_type _A = scope def UpperCAmelCase ( self ) -> Dict: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self ) -> Optional[int]: return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _A = DebertaVaModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] _A = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] _A = model(lowerCAmelCase_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: _A = DebertaVaForMaskedLM(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: _A = self.num_labels _A = DebertaVaForSequenceClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: _A = self.num_labels _A = DebertaVaForTokenClassification(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]: _A = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: _A = DebertaVaForMultipleChoice(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self ) -> Optional[int]: _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase :int = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) lowerCamelCase :str = ( { '''feature-extraction''': DebertaVaModel, '''fill-mask''': DebertaVaForMaskedLM, '''question-answering''': DebertaVaForQuestionAnswering, '''text-classification''': DebertaVaForSequenceClassification, '''token-classification''': DebertaVaForTokenClassification, '''zero-shot''': DebertaVaForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase :str = True lowerCamelCase :Union[str, Any] = False lowerCamelCase :Optional[int] = False lowerCamelCase :List[str] = False lowerCamelCase :str = False def UpperCAmelCase ( self ) -> Optional[int]: _A = DebertaVaModelTester(self ) _A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 ) def UpperCAmelCase ( self ) -> List[str]: self.config_tester.run_common_tests() def UpperCAmelCase ( self ) -> List[str]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Dict: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> int: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Dict: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Optional[int]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ ) @slow def UpperCAmelCase ( self ) -> Any: for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = DebertaVaModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class a ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason="""Model not available yet""" ) def UpperCAmelCase ( self ) -> int: pass @slow def UpperCAmelCase ( self ) -> Optional[Any]: _A = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" ) _A = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] ) _A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0] # compare the actual values for a slice. _A = torch.tensor( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
83
0
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def snake_case ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Dict) -> List[str]: _A = AutoConfig.from_pretrained(lowerCamelCase_) _A = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase_) _A = checkpoints.load_tax_checkpoint(lowerCamelCase_) _A = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp'] if config.model_type == "t5": _A = 'SelfAttention' if config.model_type == "longt5" and config.encoder_attention_type == "local": _A = 'LocalSelfAttention' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _A = 'TransientGlobalSelfAttention' else: raise ValueError( """Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`""" """ attribute with a value from [\'local\', \'transient-global].""") # Encoder for layer_index in range(config.num_layers): _A = F'''layers_{str(lowerCamelCase_)}''' # Self-Attention _A = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel'] _A = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel'] _A = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel'] _A = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel'] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _A = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale'] # Layer Normalization _A = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale'] if split_mlp_wi: _A = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel'] _A = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel'] else: _A = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel'] _A = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _A = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _A = flax_model.params['encoder']['block'][str(lowerCamelCase_)]['layer'] _A = tax_attention_key _A = tax_attention_out _A = tax_attention_query _A = tax_attention_value _A = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _A = tax_global_layer_norm if split_mlp_wi: _A = tax_mlp_wi_a _A = tax_mlp_wi_a else: _A = tax_mlp_wi _A = tax_mlp_wo _A = tax_mlp_layer_norm _A = flax_model_encoder_layer_block # Only for layer 0: _A = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T _A = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _A = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T _A = tax_encoder_global_rel_embedding # Assigning _A = tax_model['target']['encoder']['encoder_norm']['scale'] _A = tax_encoder_norm # Decoder for layer_index in range(config.num_layers): _A = F'''layers_{str(lowerCamelCase_)}''' # Self-Attention _A = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel'] _A = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel'] _A = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel'] _A = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel'] # Layer Normalization _A = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][ 'scale' ] # Encoder-Decoder-Attention _A = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention'] _A = tax_enc_dec_attention_module['key']['kernel'] _A = tax_enc_dec_attention_module['out']['kernel'] _A = tax_enc_dec_attention_module['query']['kernel'] _A = tax_enc_dec_attention_module['value']['kernel'] # Layer Normalization _A = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale'] # MLP if split_mlp_wi: _A = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel'] _A = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel'] else: _A = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel'] _A = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization _A = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning _A = flax_model.params['decoder']['block'][str(lowerCamelCase_)]['layer'] _A = tax_attention_key _A = tax_attention_out _A = tax_attention_query _A = tax_attention_value _A = tax_pre_attention_layer_norm _A = tax_enc_dec_attention_key _A = tax_enc_dec_attention_out _A = tax_enc_dec_attention_query _A = tax_enc_dec_attention_value _A = tax_cross_layer_norm if split_mlp_wi: _A = tax_mlp_wi_a _A = tax_mlp_wi_a else: _A = tax_mlp_wi _A = tax_mlp_wo _A = txa_mlp_layer_norm _A = flax_model_decoder_layer_block # Decoder Normalization _A = tax_model['target']['decoder']['decoder_norm']['scale'] _A = txa_decoder_norm # Only for layer 0: _A = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T _A = tax_decoder_rel_embedding # Token Embeddings _A = tax_model['target']['token_embedder']['embedding'] _A = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: _A = tax_model['target']['decoder']['logits_dense']['kernel'] flax_model.save_pretrained(lowerCamelCase_) print("""T5X Model was sucessfully converted!""") if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.' ) parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.') parser.add_argument( '--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.' ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
706
def snake_case ( snake_case__ :int , snake_case__ :int) -> int: return int(input_a == input_a == 0) def snake_case ( ) -> None: print("""Truth Table of NOR Gate:""") print("""| Input 1 | Input 2 | Output |""") print(F'''| 0 | 0 | {nor_gate(0 , 0)} |''') print(F'''| 0 | 1 | {nor_gate(0 , 1)} |''') print(F'''| 1 | 0 | {nor_gate(1 , 0)} |''') print(F'''| 1 | 1 | {nor_gate(1 , 1)} |''') if __name__ == "__main__": import doctest doctest.testmod() main()
83
0
'''simple docstring''' def snake_case ( snake_case__ :list[list[float]]) -> list[list[float]]: _A = [] for data in source_data: for i, el in enumerate(snake_case__): if len(snake_case__) < i + 1: data_lists.append([]) data_lists[i].append(float(snake_case__)) return data_lists def snake_case ( snake_case__ :list[list[float]] , snake_case__ :list[int]) -> list[list[float]]: _A = [] for dlist, weight in zip(snake_case__ , snake_case__): _A = min(snake_case__) _A = max(snake_case__) _A = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind))) except ZeroDivisionError: score.append(1) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind)) except ZeroDivisionError: score.append(0) # weight not 0 or 1 else: _A = F'''Invalid weight of {weight:f} provided''' raise ValueError(snake_case__) score_lists.append(snake_case__) return score_lists def snake_case ( snake_case__ :list[list[float]]) -> list[float]: _A = [0 for i in range(len(score_lists[0]))] for slist in score_lists: for j, ele in enumerate(snake_case__): _A = final_scores[j] + ele return final_scores def snake_case ( snake_case__ :list[list[float]] , snake_case__ :list[int]) -> list[list[float]]: _A = get_data(snake_case__) _A = calculate_each_score(snake_case__ , snake_case__) _A = generate_final_scores(snake_case__) # append scores to source data for i, ele in enumerate(snake_case__): source_data[i].append(snake_case__) return source_data
707
import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class a : """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=sys.maxsize ) -> str: _A = """bilinear""" _A = max_size _A = short_edge_length def __call__( self , lowerCAmelCase_ ) -> Optional[Any]: _A = [] for img in imgs: _A , _A = img.shape[:2] # later: provide list and randomly choose index for resize _A = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img _A = size * 1.0 / min(lowerCAmelCase_ , lowerCAmelCase_ ) if h < w: _A , _A = size, scale * w else: _A , _A = scale * h, size if max(lowerCAmelCase_ , lowerCAmelCase_ ) > self.max_size: _A = self.max_size * 1.0 / max(lowerCAmelCase_ , lowerCAmelCase_ ) _A = newh * scale _A = neww * scale _A = int(neww + 0.5 ) _A = int(newh + 0.5 ) if img.dtype == np.uinta: _A = Image.fromarray(lowerCAmelCase_ ) _A = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) _A = np.asarray(lowerCAmelCase_ ) else: _A = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw _A = nn.functional.interpolate( lowerCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase_ ).squeeze(0 ) img_augs.append(lowerCAmelCase_ ) return img_augs class a : """simple docstring""" def __init__( self , lowerCAmelCase_ ) -> List[Any]: _A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) _A = cfg.INPUT.FORMAT _A = cfg.SIZE_DIVISIBILITY _A = cfg.PAD_VALUE _A = cfg.INPUT.MAX_SIZE_TEST _A = cfg.MODEL.DEVICE _A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) _A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) _A = lambda lowerCAmelCase_ : (x - self.pixel_mean) / self.pixel_std def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: _A = tuple(max(lowerCAmelCase_ ) for s in zip(*[img.shape for img in images] ) ) _A = [im.shape[-2:] for im in images] _A = [ nn.functional.pad( lowerCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(lowerCAmelCase_ , lowerCAmelCase_ ) ] return torch.stack(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ ) def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int: with torch.no_grad(): if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _A = [images] if single_image: assert len(lowerCAmelCase_ ) == 1 for i in range(len(lowerCAmelCase_ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(lowerCAmelCase_ , images.pop(lowerCAmelCase_ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( lowerCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase_ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge _A = torch.tensor([im.shape[:2] for im in images] ) _A = self.aug(lowerCAmelCase_ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic _A = [self.normalizer(lowerCAmelCase_ ) for x in images] # now pad them to do the following operations _A , _A = self.pad(lowerCAmelCase_ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad _A = torch.true_divide(lowerCAmelCase_ , lowerCAmelCase_ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[Any]) -> Tuple: boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def snake_case ( snake_case__ :Optional[int] , snake_case__ :Tuple[int, int]) -> Optional[Any]: assert torch.isfinite(snake_case__).all(), "Box tensor contains infinite or NaN!" _A , _A = box_size tensor[:, 0].clamp_(min=0 , max=snake_case__) tensor[:, 1].clamp_(min=0 , max=snake_case__) tensor[:, 2].clamp_(min=0 , max=snake_case__) tensor[:, 3].clamp_(min=0 , max=snake_case__)
83
0
from __future__ import annotations import queue class a : """simple docstring""" def __init__( self , lowerCAmelCase_ ) -> Optional[int]: _A = data _A = None _A = None def snake_case ( ) -> TreeNode: print("""\n********Press N to stop entering at any point of time********\n""") _A = input("""Enter the value of the root node: """).strip().lower() _A = queue.Queue() _A = TreeNode(int(__A)) q.put(__A) while not q.empty(): _A = q.get() _A = F'''Enter the left node of {node_found.data}: ''' _A = input(__A).strip().lower() or '''n''' if check == "n": return tree_node _A = TreeNode(int(__A)) _A = left_node q.put(__A) _A = F'''Enter the right node of {node_found.data}: ''' _A = input(__A).strip().lower() or '''n''' if check == "n": return tree_node _A = TreeNode(int(__A)) _A = right_node q.put(__A) raise def snake_case ( snake_case__ :TreeNode) -> None: if not isinstance(__A , __A) or not node: return print(node.data , end=""",""") pre_order(node.left) pre_order(node.right) def snake_case ( snake_case__ :TreeNode) -> None: if not isinstance(__A , __A) or not node: return in_order(node.left) print(node.data , end=""",""") in_order(node.right) def snake_case ( snake_case__ :TreeNode) -> None: if not isinstance(__A , __A) or not node: return post_order(node.left) post_order(node.right) print(node.data , end=""",""") def snake_case ( snake_case__ :TreeNode) -> None: if not isinstance(__A , __A) or not node: return _A = queue.Queue() q.put(__A) while not q.empty(): _A = q.get() print(node_dequeued.data , end=""",""") if node_dequeued.left: q.put(node_dequeued.left) if node_dequeued.right: q.put(node_dequeued.right) def snake_case ( snake_case__ :TreeNode) -> None: if not isinstance(__A , __A) or not node: return _A = queue.Queue() q.put(__A) while not q.empty(): _A = [] while not q.empty(): _A = q.get() print(node_dequeued.data , end=""",""") if node_dequeued.left: list_.append(node_dequeued.left) if node_dequeued.right: list_.append(node_dequeued.right) print() for node in list_: q.put(__A) def snake_case ( snake_case__ :TreeNode) -> None: if not isinstance(__A , __A) or not node: return _A = [] _A = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""") stack.append(__A) _A = n.left # end of while means current node doesn't have left child _A = stack.pop() # start to traverse its right child _A = n.right def snake_case ( snake_case__ :TreeNode) -> None: if not isinstance(__A , __A) or not node: return _A = [] _A = node while n or stack: while n: stack.append(__A) _A = n.left _A = stack.pop() print(n.data , end=""",""") _A = n.right def snake_case ( snake_case__ :TreeNode) -> None: if not isinstance(__A , __A) or not node: return _A = [], [] _A = node stacka.append(__A) while stacka: # to find the reversed order of post order, store it in stack2 _A = stacka.pop() if n.left: stacka.append(n.left) if n.right: stacka.append(n.right) stacka.append(__A) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""") def snake_case ( snake_case__ :str = "" , snake_case__ :List[Any]=50 , snake_case__ :List[Any]="*") -> str: if not s: return "\n" + width * char _A = divmod(width - len(__A) - 2 , 2) return F'''{left * char} {s} {(left + extra) * char}''' if __name__ == "__main__": import doctest doctest.testmod() print(prompt('Binary Tree Traversals')) _SCREAMING_SNAKE_CASE = build_tree() print(prompt('Pre Order Traversal')) pre_order(node) print(prompt() + '\n') print(prompt('In Order Traversal')) in_order(node) print(prompt() + '\n') print(prompt('Post Order Traversal')) post_order(node) print(prompt() + '\n') print(prompt('Level Order Traversal')) level_order(node) print(prompt() + '\n') print(prompt('Actual Level Order Traversal')) level_order_actual(node) print('*' * 50 + '\n') print(prompt('Pre Order Traversal - Iteration Version')) pre_order_iter(node) print(prompt() + '\n') print(prompt('In Order Traversal - Iteration Version')) in_order_iter(node) print(prompt() + '\n') print(prompt('Post Order Traversal - Iteration Version')) post_order_iter(node) print(prompt())
708
from collections import defaultdict def snake_case ( snake_case__ :int) -> int: _A = 1 _A = True for v in tree[start]: if v not in visited: ret += dfs(snake_case__) if ret % 2 == 0: cuts.append(snake_case__) return ret def snake_case ( ) -> Any: dfs(1) if __name__ == "__main__": _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10, 9 _SCREAMING_SNAKE_CASE = defaultdict(list) _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
83
0
def snake_case ( snake_case__ :Dict , snake_case__ :Dict) -> Dict: _A = 0 _A = len(__lowerCAmelCase) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None _A = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(__lowerCAmelCase): return None _A = sorted_collection[point] if current_item == item: return point else: if point < left: _A = left _A = point elif point > right: _A = right _A = point else: if item < current_item: _A = point - 1 else: _A = point + 1 return None def snake_case ( snake_case__ :List[str] , snake_case__ :Union[str, Any] , snake_case__ :Dict , snake_case__ :str) -> Optional[int]: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None _A = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(__lowerCAmelCase): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) elif point > right: return interpolation_search_by_recursion(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , point - 1) else: return interpolation_search_by_recursion( __lowerCAmelCase , __lowerCAmelCase , point + 1 , __lowerCAmelCase) def snake_case ( snake_case__ :str) -> Dict: if collection != sorted(__lowerCAmelCase): raise ValueError("""Collection must be ascending sorted""") return True if __name__ == "__main__": import sys _SCREAMING_SNAKE_CASE = 0 if debug == 1: _SCREAMING_SNAKE_CASE = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit('Sequence must be ascending sorted to apply interpolation search') _SCREAMING_SNAKE_CASE = 67 _SCREAMING_SNAKE_CASE = interpolation_search(collection, target) if result is not None: print(F'''{target} found at positions: {result}''') else: print('Not found')
709
import heapq def snake_case ( snake_case__ :dict) -> set[int]: _A = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(snake_case__ , [-1 * len(snake_case__), (key, value)]) # chosen_vertices = set of chosen vertices _A = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices _A = heapq.heappop(snake_case__)[1][0] chosen_vertices.add(snake_case__) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: _A = elem[1][1].index(snake_case__) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(snake_case__) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() _SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
83
0
import heapq as hq import math from collections.abc import Iterator class a : """simple docstring""" def __init__( self , lowerCAmelCase_ ) -> Tuple: _A = str(id_ ) _A = None _A = None _A = [] _A = {} # {vertex:distance} def __lt__( self , lowerCAmelCase_ ) -> Dict: return self.key < other.key def __repr__( self ) -> Dict: return self.id def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int: self.neighbors.append(UpperCamelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]: _A = weight def snake_case ( snake_case__ :Dict , snake_case__ :Optional[Any] , snake_case__ :Union[str, Any] , snake_case__ :Tuple) -> Union[str, Any]: graph[a - 1].add_neighbor(graph[b - 1]) graph[b - 1].add_neighbor(graph[a - 1]) # add the edges: graph[a - 1].add_edge(graph[b - 1] , lowerCamelCase__) graph[b - 1].add_edge(graph[a - 1] , lowerCamelCase__) def snake_case ( snake_case__ :str , snake_case__ :Dict) -> list: _A = [] for u in graph: _A = math.inf _A = None _A = 0 _A = graph[:] while q: _A = min(lowerCamelCase__) q.remove(lowerCamelCase__) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): _A = u _A = u.edges[v.id] for i in range(1 , len(lowerCamelCase__)): a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1)) return a def snake_case ( snake_case__ :int , snake_case__ :List[Any]) -> Iterator[tuple]: for u in graph: _A = math.inf _A = None _A = 0 _A = list(lowerCamelCase__) hq.heapify(lowerCamelCase__) while h: _A = hq.heappop(lowerCamelCase__) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): _A = u _A = u.edges[v.id] hq.heapify(lowerCamelCase__) for i in range(1 , len(lowerCamelCase__)): yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1) def snake_case ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
710
import math import unittest def snake_case ( snake_case__ :int) -> bool: assert isinstance(snake_case__ , snake_case__) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__) + 1) , 6): if number % i == 0 or number % (i + 2) == 0: return False return True class a ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> List[Any]: self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def UpperCAmelCase ( self ) -> Dict: with self.assertRaises(lowerCAmelCase_ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , ) self.assertFalse( is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
83
0
def snake_case ( snake_case__ :int , snake_case__ :int) -> Dict: while b: _A = b, a % b return a def snake_case ( snake_case__ :int , snake_case__ :int) -> Tuple: return a if b == 0 else euclidean_gcd_recursive(_lowerCAmelCase , a % b) def snake_case ( ) -> str: print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5)}''') print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3)}''') print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3)}''') print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6)}''') print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3)}''') print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5)}''') print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3)}''') print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3)}''') print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6)}''') print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3)}''') if __name__ == "__main__": main()
711
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = {'configuration_encoder_decoder': ['EncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['EncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['TFEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['FlaxEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
83
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json", "YituTech/conv-bert-medium-small": ( "https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json" ), "YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json", # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class a ( UpperCamelCase_ ): """simple docstring""" lowerCamelCase :List[Any] = """convbert""" def __init__( self , lowerCAmelCase_=3_05_22 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=7_68 , lowerCAmelCase_=2 , lowerCAmelCase_=9 , lowerCAmelCase_=1 , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Dict: super().__init__( pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a , ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = embedding_size _A = head_ratio _A = conv_kernel_size _A = num_groups _A = classifier_dropout class a ( UpperCamelCase_ ): """simple docstring""" @property def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _A = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
712
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) @add_end_docstrings(__lowerCAmelCase ) class a ( __lowerCAmelCase ): """simple docstring""" def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]: super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) self.check_model_type(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple: _A , _A = {}, {} if padding is not None: _A = padding if truncation is not None: _A = truncation if top_k is not None: _A = top_k return preprocess_params, {}, postprocess_params def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ) -> Union[str, Any]: if isinstance(lowerCAmelCase_ , (Image.Image, str) ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _A = {"""image""": image, """question""": question} else: _A = image _A = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ ) return results def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any: _A = load_image(inputs["""image"""] ) _A = self.tokenizer( inputs["""question"""] , return_tensors=self.framework , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ ) _A = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework ) model_inputs.update(lowerCAmelCase_ ) return model_inputs def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: _A = self.model(**lowerCAmelCase_ ) return model_outputs def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> Union[str, Any]: if top_k > self.model.config.num_labels: _A = self.model.config.num_labels if self.framework == "pt": _A = model_outputs.logits.sigmoid()[0] _A , _A = probs.topk(lowerCAmelCase_ ) else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) _A = scores.tolist() _A = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
83
0
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py _SCREAMING_SNAKE_CASE = 'src/transformers' _SCREAMING_SNAKE_CASE = 'docs/source/en/tasks' def snake_case ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Optional[Any]) -> Tuple: '''simple docstring''' with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""") as f: _A = f.readlines() # Find the start prompt. _A = 0 while not lines[start_index].startswith(__UpperCAmelCase): start_index += 1 start_index += 1 _A = start_index while not lines[end_index].startswith(__UpperCAmelCase): end_index += 1 end_index -= 1 while len(lines[start_index]) <= 1: start_index += 1 while len(lines[end_index]) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index]), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. _SCREAMING_SNAKE_CASE = direct_transformers_import(TRANSFORMERS_PATH) _SCREAMING_SNAKE_CASE = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). _SCREAMING_SNAKE_CASE = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def snake_case ( snake_case__ :Optional[int]) -> Optional[int]: '''simple docstring''' _A = TASK_GUIDE_TO_MODELS[task_guide] _A = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCAmelCase , set()) _A = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()]) + "\n" def snake_case ( snake_case__ :Optional[Any] , snake_case__ :Optional[Any]=False) -> Tuple: '''simple docstring''' _A , _A , _A , _A = _find_text_in_file( filename=os.path.join(__UpperCAmelCase , __UpperCAmelCase) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , ) _A = get_model_list_for_task(__UpperCAmelCase) if current_list != new_list: if overwrite: with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase) , """w""" , encoding="""utf-8""" , newline="""\n""") as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:]) else: raise ValueError( F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`''' """ to fix this.""") if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') _SCREAMING_SNAKE_CASE = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
713
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :str , snake_case__ :PreTrainedTokenizer , snake_case__ :int , snake_case__ :Optional[int] = None , ) -> Optional[int]: _A = {} if train_file is not None: _A = [train_file] if eval_file is not None: _A = [eval_file] if test_file is not None: _A = [test_file] _A = datasets.load_dataset("""csv""" , data_files=snake_case__) _A = list(ds[list(files.keys())[0]].features.keys()) _A = features_name.pop(snake_case__) _A = list(set(ds[list(files.keys())[0]][label_name])) _A = {label: i for i, label in enumerate(snake_case__)} _A = tokenizer.model_input_names _A = {} if len(snake_case__) == 1: for k in files.keys(): _A = ds[k].map( lambda snake_case__: tokenizer.batch_encode_plus( example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""") , batched=snake_case__ , ) elif len(snake_case__) == 2: for k in files.keys(): _A = ds[k].map( lambda snake_case__: tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""" , ) , batched=snake_case__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) _A = ( tf.data.Dataset.from_generator( snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: _A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN]))) _A = ( tf.data.Dataset.from_generator( snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: _A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION]))) _A = ( tf.data.Dataset.from_generator( snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: _A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST]))) return train_ds, val_ds, test_ds, labelaid _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) @dataclass class a : """simple docstring""" lowerCamelCase :int = field(metadata={'''help''': '''Which column contains the label'''} ) lowerCamelCase :str = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the training file'''} ) lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the development file'''} ) lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the test file'''} ) lowerCamelCase :int = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) lowerCamelCase :bool = field( default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class a : """simple docstring""" lowerCamelCase :str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) lowerCamelCase :Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCamelCase :Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. lowerCamelCase :Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def snake_case ( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) _A , _A , _A = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""") # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.info( F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, ''' F'''16-bits training: {training_args.fpaa}''') logger.info(F'''Training/evaluation parameters {training_args}''') # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _A , _A , _A , _A = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) _A = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): _A = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , ) def compute_metrics(snake_case__ :EvalPrediction) -> Dict: _A = np.argmax(p.predictions , axis=1) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer _A = TFTrainer( model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir) # Evaluation _A = {} if training_args.do_eval: logger.info("""*** Evaluate ***""") _A = trainer.evaluate() _A = os.path.join(training_args.output_dir , """eval_results.txt""") with open(snake_case__ , """w""") as writer: logger.info("""***** Eval results *****""") for key, value in result.items(): logger.info(F''' {key} = {value}''') writer.write(F'''{key} = {value}\n''') results.update(snake_case__) return results if __name__ == "__main__": main()
83
0
import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--original_config_file', type=str, required=True, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--image_size', default=512, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') def snake_case ( snake_case__ :str) -> Any: if string == "True": return True elif string == "False": return False else: raise ValueError(F'''could not parse string as bool {string}''') parser.add_argument( '--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool ) parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int) _SCREAMING_SNAKE_CASE = parser.parse_args() _SCREAMING_SNAKE_CASE = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
714
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Union[str, Any] = '''speech_to_text''' lowerCamelCase :List[str] = ['''past_key_values'''] lowerCamelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , lowerCAmelCase_=1_00_00 , lowerCAmelCase_=12 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=60_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=2 , lowerCAmelCase_=(5, 5) , lowerCAmelCase_=10_24 , lowerCAmelCase_=80 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Tuple: _A = vocab_size _A = d_model _A = encoder_ffn_dim _A = encoder_layers _A = encoder_attention_heads _A = decoder_ffn_dim _A = decoder_layers _A = decoder_attention_heads _A = dropout _A = attention_dropout _A = activation_dropout _A = activation_function _A = init_std _A = encoder_layerdrop _A = decoder_layerdrop _A = use_cache _A = encoder_layers _A = scale_embedding # scale factor will be sqrt(d_model) if True _A = max_source_positions _A = max_target_positions _A = num_conv_layers _A = list(lowerCAmelCase_ ) _A = conv_channels _A = input_feat_per_channel _A = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """ F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' F'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
83
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { "configuration_clap": [ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapAudioConfig", "ClapConfig", "ClapTextConfig", ], "processing_clap": ["ClapProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapModel", "ClapPreTrainedModel", "ClapTextModel", "ClapTextModelWithProjection", "ClapAudioModel", "ClapAudioModelWithProjection", ] _SCREAMING_SNAKE_CASE = ["ClapFeatureExtractor"] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
715
from __future__ import annotations from collections.abc import Callable def snake_case ( snake_case__ :Callable[[int | float], int | float] , snake_case__ :int | float , snake_case__ :int | float , snake_case__ :int = 100 , ) -> float: _A = x_start _A = fnc(snake_case__) _A = 0.0 for _ in range(snake_case__): # Approximates small segments of curve as linear and solve # for trapezoidal area _A = (x_end - x_start) / steps + xa _A = fnc(snake_case__) area += abs(fxa + fxa) * (xa - xa) / 2 # Increment step _A = xa _A = fxa return area if __name__ == "__main__": def snake_case ( snake_case__ :Tuple) -> List[str]: return x**3 + x**2 print('f(x) = x^3 + x^2') print('The area between the curve, x = -5, x = 5 and the x axis is:') _SCREAMING_SNAKE_CASE = 10 while i <= 100_000: print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''') i *= 10
83
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { 'configuration_bigbird_pegasus': [ 'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BigBirdPegasusConfig', 'BigBirdPegasusOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ 'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST', 'BigBirdPegasusForCausalLM', 'BigBirdPegasusForConditionalGeneration', 'BigBirdPegasusForQuestionAnswering', 'BigBirdPegasusForSequenceClassification', 'BigBirdPegasusModel', 'BigBirdPegasusPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
716
import numpy as np import qiskit def snake_case ( snake_case__ :int = 8 , snake_case__ :int | None = None) -> str: _A = np.random.default_rng(seed=snake_case__) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. _A = 6 * key_len # Measurement basis for Alice's qubits. _A = rng.integers(2 , size=snake_case__) # The set of states Alice will prepare. _A = rng.integers(2 , size=snake_case__) # Measurement basis for Bob's qubits. _A = rng.integers(2 , size=snake_case__) # Quantum Circuit to simulate BB84 _A = qiskit.QuantumCircuit(snake_case__ , name="""BB84""") # Alice prepares her qubits according to rules above. for index, _ in enumerate(snake_case__): if alice_state[index] == 1: bbaa_circ.x(snake_case__) if alice_basis[index] == 1: bbaa_circ.h(snake_case__) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(snake_case__): if bob_basis[index] == 1: bbaa_circ.h(snake_case__) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. _A = qiskit.Aer.get_backend("""aer_simulator""") # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. _A = qiskit.execute(snake_case__ , snake_case__ , shots=1 , seed_simulator=snake_case__) # Returns the result of measurement. _A = job.result().get_counts(snake_case__).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. _A = """""".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( snake_case__ , snake_case__ , snake_case__) if alice_basis_bit == bob_basis_bit ]) # Get final key. Pad with 0 if too short, otherwise truncate. _A = gen_key[:key_len] if len(snake_case__) >= key_len else gen_key.ljust(snake_case__ , """0""") return key if __name__ == "__main__": print(F'''The generated key is : {bbaa(8, seed=0)}''') from doctest import testmod testmod()
83
0
'''simple docstring''' import numpy as np class a : """simple docstring""" def __init__( self ) -> Optional[Any]: _A = (0, 0) _A = None _A = 0 _A = 0 _A = 0 def __eq__( self , lowerCAmelCase_ ) -> List[str]: return self.position == cell.position def UpperCAmelCase ( self ) -> Optional[Any]: print(self.position ) class a : """simple docstring""" def __init__( self , lowerCAmelCase_=(5, 5) ) -> int: _A = np.zeros(__lowerCamelCase ) _A = world_size[0] _A = world_size[1] def UpperCAmelCase ( self ) -> Tuple: print(self.w ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]: _A = [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] _A = cell.position[0] _A = cell.position[1] _A = [] for n in neughbour_cord: _A = current_x + n[0] _A = current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: _A = Cell() _A = (x, y) _A = cell neighbours.append(__lowerCamelCase ) return neighbours def snake_case ( snake_case__ :List[str] , snake_case__ :List[str] , snake_case__ :Tuple) -> str: _A = [] _A = [] _open.append(_lowerCamelCase) while _open: _A = np.argmin([n.f for n in _open]) _A = _open[min_f] _closed.append(_open.pop(_lowerCamelCase)) if current == goal: break for n in world.get_neigbours(_lowerCamelCase): for c in _closed: if c == n: continue _A = current.g + 1 _A = n.position _A = goal.position _A = (ya - ya) ** 2 + (xa - xa) ** 2 _A = n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(_lowerCamelCase) _A = [] while current.parent is not None: path.append(current.position) _A = current.parent path.append(current.position) return path[::-1] if __name__ == "__main__": _SCREAMING_SNAKE_CASE = Gridworld() # Start position and goal _SCREAMING_SNAKE_CASE = Cell() _SCREAMING_SNAKE_CASE = (0, 0) _SCREAMING_SNAKE_CASE = Cell() _SCREAMING_SNAKE_CASE = (4, 4) print(F'''path from {start.position} to {goal.position}''') _SCREAMING_SNAKE_CASE = astar(world, start, goal) # Just for visual reasons. for i in s: _SCREAMING_SNAKE_CASE = 1 print(world.w)
717
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def snake_case ( snake_case__ :int) -> Optional[int]: return EnvironmentCommand() def snake_case ( snake_case__ :Tuple) -> List[str]: return EnvironmentCommand(args.accelerate_config_file) class a ( __lowerCAmelCase ): """simple docstring""" @staticmethod def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple: _A = parser.add_parser("""env""" ) download_parser.set_defaults(func=lowerCAmelCase_ ) download_parser.add_argument( """--accelerate-config_file""" , default=lowerCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , ) download_parser.set_defaults(func=lowerCAmelCase_ ) def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ ) -> None: _A = accelerate_config_file def UpperCAmelCase ( self ) -> Dict: _A = """not installed""" if is_safetensors_available(): import safetensors _A = safetensors.__version__ elif importlib.util.find_spec("""safetensors""" ) is not None: import safetensors _A = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.''' _A = """not installed""" _A = _A = """not found""" if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file _A = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ): _A = load_config_from_file(self._accelerate_config_file ).to_dict() _A = ( """\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else F'''\t{accelerate_config}''' ) _A = """not installed""" _A = """NA""" if is_torch_available(): import torch _A = torch.__version__ _A = torch.cuda.is_available() _A = """not installed""" _A = """NA""" if is_tf_available(): import tensorflow as tf _A = tf.__version__ try: # deprecated in v2.1 _A = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool _A = bool(tf.config.list_physical_devices("""GPU""" ) ) _A = """not installed""" _A = """not installed""" _A = """not installed""" _A = """NA""" if is_flax_available(): import flax import jax import jaxlib _A = flax.__version__ _A = jax.__version__ _A = jaxlib.__version__ _A = jax.lib.xla_bridge.get_backend().platform _A = { """`transformers` version""": version, """Platform""": platform.platform(), """Python version""": platform.python_version(), """Huggingface_hub version""": huggingface_hub.__version__, """Safetensors version""": F'''{safetensors_version}''', """Accelerate version""": F'''{accelerate_version}''', """Accelerate config""": F'''{accelerate_config_str}''', """PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''', """Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''', """Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''', """Jax version""": F'''{jax_version}''', """JaxLib version""": F'''{jaxlib_version}''', """Using GPU in script?""": """<fill in>""", """Using distributed or parallel set-up in script?""": """<fill in>""", } print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" ) print(self.format_dict(lowerCAmelCase_ ) ) return info @staticmethod def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple: return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
83
0
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class a : """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=10 , lowerCAmelCase_=3 , lowerCAmelCase_=32 * 4 , lowerCAmelCase_=32 * 6 , lowerCAmelCase_=4 , lowerCAmelCase_=32 , ) -> Optional[int]: _A = parent _A = batch_size _A = is_training _A = use_auxiliary_loss _A = num_queries _A = num_channels _A = min_size _A = max_size _A = num_labels _A = mask_feature_size def UpperCAmelCase ( self ) -> Any: _A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( lowerCamelCase_ ) _A = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase_ ) _A = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase_ ) > 0.5 ).float() _A = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase_ ) > 0.5).long() _A = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCAmelCase ( self ) -> str: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def UpperCAmelCase ( self ) -> Dict: _A = self.prepare_config_and_inputs() _A = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]: _A = output.encoder_hidden_states _A = output.pixel_decoder_hidden_states _A = output.transformer_decoder_hidden_states self.parent.assertTrue(len(lowerCamelCase_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCamelCase_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCamelCase_ ) , config.decoder_config.decoder_layers ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> str: with torch.no_grad(): _A = MaskFormerModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() _A = model(pixel_values=lowerCamelCase_ , pixel_mask=lowerCamelCase_ ) _A = model(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _A = MaskFormerForInstanceSegmentation(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() def comm_check_on_output(lowerCAmelCase_ ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _A = model(pixel_values=lowerCamelCase_ , pixel_mask=lowerCamelCase_ ) _A = model(lowerCamelCase_ ) comm_check_on_output(lowerCamelCase_ ) _A = model( pixel_values=lowerCamelCase_ , pixel_mask=lowerCamelCase_ , mask_labels=lowerCamelCase_ , class_labels=lowerCamelCase_ ) comm_check_on_output(lowerCamelCase_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class a ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase :Tuple = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () lowerCamelCase :Tuple = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) lowerCamelCase :Tuple = False lowerCamelCase :Union[str, Any] = False lowerCamelCase :Union[str, Any] = False lowerCamelCase :Optional[int] = False def UpperCAmelCase ( self ) -> int: _A = MaskFormerModelTester(self ) _A = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def UpperCAmelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def UpperCAmelCase ( self ) -> Optional[int]: _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(lowerCamelCase_ , **lowerCamelCase_ , output_hidden_states=lowerCamelCase_ ) def UpperCAmelCase ( self ) -> str: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCamelCase_ ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def UpperCAmelCase ( self ) -> str: pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def UpperCAmelCase ( self ) -> Tuple: pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def UpperCAmelCase ( self ) -> Any: pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def UpperCAmelCase ( self ) -> List[Any]: pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`""" ) def UpperCAmelCase ( self ) -> Dict: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCAmelCase ( self ) -> Union[str, Any]: pass def UpperCAmelCase ( self ) -> str: _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(lowerCamelCase_ ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) @slow def UpperCAmelCase ( self ) -> Optional[Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: _A = MaskFormerModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def UpperCAmelCase ( self ) -> Tuple: _A = (self.model_tester.min_size,) * 2 _A = { '''pixel_values''': torch.randn((2, 3, *size) , device=lowerCamelCase_ ), '''mask_labels''': torch.randn((2, 10, *size) , device=lowerCamelCase_ ), '''class_labels''': torch.zeros(2 , 10 , device=lowerCamelCase_ ).long(), } _A = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCamelCase_ ) _A = model(**lowerCamelCase_ ) self.assertTrue(outputs.loss is not None ) def UpperCAmelCase ( self ) -> Optional[Any]: _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(lowerCamelCase_ , **lowerCamelCase_ , output_hidden_states=lowerCamelCase_ ) def UpperCAmelCase ( self ) -> Any: _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(lowerCamelCase_ ).to(lowerCamelCase_ ) _A = model(**lowerCamelCase_ , output_attentions=lowerCamelCase_ ) self.assertTrue(outputs.attentions is not None ) def UpperCAmelCase ( self ) -> Dict: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _A = self.all_model_classes[1] _A = self.model_tester.prepare_config_and_inputs() _A = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.train() _A = model(lowerCamelCase_ , mask_labels=lowerCamelCase_ , class_labels=lowerCamelCase_ ).loss loss.backward() def UpperCAmelCase ( self ) -> int: _A = self.all_model_classes[1] _A = self.model_tester.prepare_config_and_inputs() _A = True _A = True _A = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.train() _A = model(lowerCamelCase_ , mask_labels=lowerCamelCase_ , class_labels=lowerCamelCase_ ) _A = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _A = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _A = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _A = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=lowerCamelCase_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _SCREAMING_SNAKE_CASE = 1e-4 def snake_case ( ) -> Tuple: _A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""") return image @require_vision @slow class a ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCAmelCase ( self ) -> Optional[int]: return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def UpperCAmelCase ( self ) -> str: _A = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(lowerCamelCase_ ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase_ , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**lowerCamelCase_ ) _A = torch.tensor( [[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(lowerCamelCase_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) ) _A = torch.tensor( [[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(lowerCamelCase_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) ) _A = torch.tensor( [[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(lowerCamelCase_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) ) def UpperCAmelCase ( self ) -> Any: _A = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(lowerCamelCase_ ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase_ , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**lowerCamelCase_ ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [ [-1.373_7124, -1.772_4937, -1.936_4233], [-1.597_7281, -1.986_7939, -2.152_3695], [-1.579_5398, -1.926_9832, -2.09_3942], ] _A = torch.tensor(lowerCamelCase_ ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [ [1.6512E00, -5.2572E00, -3.3519E00], [3.6169E-02, -5.9025E00, -2.9313E00], [1.0766E-04, -7.7630E00, -5.1263E00], ] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) ) def UpperCAmelCase ( self ) -> str: _A = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(lowerCamelCase_ ) .eval() ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ ) _A = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase_ , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _A = model(**lowerCamelCase_ ) # masks_queries_logits _A = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _A = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.77_11]] _A = torch.tensor(lowerCamelCase_ ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) ) # class_queries_logits _A = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _A = torch.tensor( [[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) ) def UpperCAmelCase ( self ) -> Tuple: _A = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(lowerCamelCase_ ) .eval() ) _A = self.default_image_processor _A = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , ) _A = inputs['''pixel_values'''].to(lowerCamelCase_ ) _A = [el.to(lowerCamelCase_ ) for el in inputs['''mask_labels''']] _A = [el.to(lowerCamelCase_ ) for el in inputs['''class_labels''']] with torch.no_grad(): _A = model(**lowerCamelCase_ ) self.assertTrue(outputs.loss is not None )
718
import colorsys from PIL import Image # type: ignore def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float: _A = x _A = y for step in range(snake_case__): # noqa: B007 _A = a * a - b * b + x _A = 2 * a * b + y _A = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def snake_case ( snake_case__ :float) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def snake_case ( snake_case__ :float) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1)) def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image: _A = Image.new("""RGB""" , (image_width, image_height)) _A = img.load() # loop through the image-coordinates for image_x in range(snake_case__): for image_y in range(snake_case__): # determine the figure-coordinates based on the image-coordinates _A = figure_width / image_width * image_height _A = figure_center_x + (image_x / image_width - 0.5) * figure_width _A = figure_center_y + (image_y / image_height - 0.5) * figure_height _A = get_distance(snake_case__ , snake_case__ , snake_case__) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: _A = get_color_coded_rgb(snake_case__) else: _A = get_black_and_white_rgb(snake_case__) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _SCREAMING_SNAKE_CASE = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
83
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = { '''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''], '''tokenization_roformer''': ['''RoFormerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['''RoFormerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ '''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoFormerForCausalLM''', '''RoFormerForMaskedLM''', '''RoFormerForMultipleChoice''', '''RoFormerForQuestionAnswering''', '''RoFormerForSequenceClassification''', '''RoFormerForTokenClassification''', '''RoFormerLayer''', '''RoFormerModel''', '''RoFormerPreTrainedModel''', '''load_tf_weights_in_roformer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ '''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRoFormerForCausalLM''', '''TFRoFormerForMaskedLM''', '''TFRoFormerForMultipleChoice''', '''TFRoFormerForQuestionAnswering''', '''TFRoFormerForSequenceClassification''', '''TFRoFormerForTokenClassification''', '''TFRoFormerLayer''', '''TFRoFormerModel''', '''TFRoFormerPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ '''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxRoFormerForMaskedLM''', '''FlaxRoFormerForMultipleChoice''', '''FlaxRoFormerForQuestionAnswering''', '''FlaxRoFormerForSequenceClassification''', '''FlaxRoFormerForTokenClassification''', '''FlaxRoFormerModel''', '''FlaxRoFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
719
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets _SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' _SCREAMING_SNAKE_CASE = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' _SCREAMING_SNAKE_CASE = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def snake_case ( snake_case__ :Optional[Any] , snake_case__ :str , snake_case__ :List[str]=False , snake_case__ :Dict=False , snake_case__ :Any=True , snake_case__ :List[str]=False , snake_case__ :Optional[Any]="dummy_doc") -> List[Any]: _A = {doc: key_lines} _A = {doc: sys_lines} _A = {} _A = 0 _A = 0 _A = 0 _A = 0 _A = 0 _A = 0 _A , _A = reader.get_doc_mentions(snake_case__ , key_doc_lines[doc] , snake_case__) key_singletons_num += singletons_num if NP_only or min_span: _A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__) _A , _A = reader.get_doc_mentions(snake_case__ , sys_doc_lines[doc] , snake_case__) sys_singletons_num += singletons_num if NP_only or min_span: _A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__) if remove_nested: _A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _A = reader.get_mention_assignments(snake_case__ , snake_case__) _A = reader.get_mention_assignments(snake_case__ , snake_case__) _A = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''') logger.info( """Number of resulting singleton clusters in the key """ F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''') if not keep_singletons: logger.info( F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' """files, respectively""") return doc_coref_infos def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Dict , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Tuple) -> int: _A = get_coref_infos(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__) _A = {} _A = 0 _A = 0 for name, metric in metrics: _A , _A , _A = evaluator.evaluate_documents(snake_case__ , snake_case__ , beta=1) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa}) logger.info( name.ljust(10) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , ) if conll_subparts_num == 3: _A = (conll / 3) * 100 logger.info(F'''CoNLL score: {conll:.2f}''') output_scores.update({"""conll_score""": conll}) return output_scores def snake_case ( snake_case__ :Union[str, Any]) -> List[Any]: _A = False for line in key_lines: if not line.startswith("""#"""): if len(line.split()) > 6: _A = line.split()[5] if not parse_col == "-": _A = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): """simple docstring""" def UpperCAmelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ] , ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Union[str, Any]: _A = [ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: _A = util.check_gold_parse_annotation(lowerCAmelCase_ ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _A = evaluate( key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , ) return score
83
0
from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name _SCREAMING_SNAKE_CASE = """ Examples: ```py >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline >>> from diffusers.utils import load_image >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16 ... ) >>> pipe_prior.to(\"cuda\") >>> prompt = \"A red cartoon frog, 4k\" >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16 ... ) >>> pipe.to(\"cuda\") >>> init_image = load_image( ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\" ... \"/kandinsky/frog.png\" ... ) >>> image = pipe( ... image=init_image, ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... strength=0.2, ... ).images >>> image[0].save(\"red_frog.png\") ``` """ def snake_case ( snake_case__ :List[str] , snake_case__ :List[Any] , snake_case__ :Optional[Any]=8) -> Optional[int]: _A = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 _A = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def snake_case ( snake_case__ :int , snake_case__ :int=512 , snake_case__ :str=512) -> Tuple: _A = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1) _A = np.array(pil_image.convert("""RGB""")) _A = arr.astype(np.floataa) / 127.5 - 1 _A = np.transpose(snake_case__ , [2, 0, 1]) _A = torch.from_numpy(snake_case__).unsqueeze(0) return image class a ( _a ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> Union[str, Any]: super().__init__() self.register_modules( unet=snake_case_ , scheduler=snake_case_ , movq=snake_case_ , ) _A = 2 ** (len(self.movq.config.block_out_channels ) - 1) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]: # get the original timestep using init_timestep _A = min(int(num_inference_steps * strength ) , snake_case_ ) _A = max(num_inference_steps - init_timestep , 0 ) _A = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> str: if not isinstance(snake_case_ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case_ )}''' ) _A = image.to(device=snake_case_ , dtype=snake_case_ ) _A = batch_size * num_images_per_prompt if image.shape[1] == 4: _A = image else: if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(snake_case_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) elif isinstance(snake_case_ , snake_case_ ): _A = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case_ ) ] _A = torch.cat(snake_case_ , dim=0 ) else: _A = self.movq.encode(snake_case_ ).latent_dist.sample(snake_case_ ) _A = self.movq.config.scaling_factor * init_latents _A = torch.cat([init_latents] , dim=0 ) _A = init_latents.shape _A = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ ) # get latents _A = self.scheduler.add_noise(snake_case_ , snake_case_ , snake_case_ ) _A = init_latents return latents def UpperCAmelCase ( self , lowerCAmelCase_=0 ) -> Tuple: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) _A = torch.device(F'''cuda:{gpu_id}''' ) _A = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(snake_case_ , snake_case_ ) def UpperCAmelCase ( self , lowerCAmelCase_=0 ) -> List[str]: if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) _A = torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=snake_case_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _A = None for cpu_offloaded_model in [self.unet, self.movq]: _A , _A = cpu_offload_with_hook(snake_case_ , snake_case_ , prev_module_hook=snake_case_ ) # We'll offload the last model manually. _A = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCAmelCase ( self ) -> Optional[int]: if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(snake_case_ , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(snake_case_ ) def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 5_12 , lowerCAmelCase_ = 5_12 , lowerCAmelCase_ = 1_00 , lowerCAmelCase_ = 4.0 , lowerCAmelCase_ = 0.3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , ) -> List[str]: _A = self._execution_device _A = guidance_scale > 1.0 if isinstance(snake_case_ , snake_case_ ): _A = torch.cat(snake_case_ , dim=0 ) _A = image_embeds.shape[0] if isinstance(snake_case_ , snake_case_ ): _A = torch.cat(snake_case_ , dim=0 ) if do_classifier_free_guidance: _A = image_embeds.repeat_interleave(snake_case_ , dim=0 ) _A = negative_image_embeds.repeat_interleave(snake_case_ , dim=0 ) _A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case_ ) if not isinstance(snake_case_ , snake_case_ ): _A = [image] if not all(isinstance(snake_case_ , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( F'''Input is in incorrect format: {[type(snake_case_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' ) _A = torch.cat([prepare_image(snake_case_ , snake_case_ , snake_case_ ) for i in image] , dim=0 ) _A = image.to(dtype=image_embeds.dtype , device=snake_case_ ) _A = self.movq.encode(snake_case_ )["""latents"""] _A = latents.repeat_interleave(snake_case_ , dim=0 ) self.scheduler.set_timesteps(snake_case_ , device=snake_case_ ) _A , _A = self.get_timesteps(snake_case_ , snake_case_ , snake_case_ ) _A = timesteps[:1].repeat(batch_size * num_images_per_prompt ) _A , _A = downscale_height_and_width(snake_case_ , snake_case_ , self.movq_scale_factor ) _A = self.prepare_latents( snake_case_ , snake_case_ , snake_case_ , snake_case_ , image_embeds.dtype , snake_case_ , snake_case_ ) for i, t in enumerate(self.progress_bar(snake_case_ ) ): # expand the latents if we are doing classifier free guidance _A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _A = {"""image_embeds""": image_embeds} _A = self.unet( sample=snake_case_ , timestep=snake_case_ , encoder_hidden_states=snake_case_ , added_cond_kwargs=snake_case_ , return_dict=snake_case_ , )[0] if do_classifier_free_guidance: _A , _A = noise_pred.split(latents.shape[1] , dim=1 ) _A , _A = noise_pred.chunk(2 ) _A , _A = variance_pred.chunk(2 ) _A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _A = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _A , _A = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 _A = self.scheduler.step( snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ , )[0] # post-processing _A = self.movq.decode(snake_case_ , force_not_quantize=snake_case_ )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: _A = image * 0.5 + 0.5 _A = image.clamp(0 , 1 ) _A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": _A = self.numpy_to_pil(snake_case_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case_ )
720
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } _SCREAMING_SNAKE_CASE = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } _SCREAMING_SNAKE_CASE = {'facebook/blenderbot_small-90M': 512} def snake_case ( snake_case__ :Tuple) -> str: _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char)) _A = char _A = set(snake_case__) return pairs class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :List[Any] = VOCAB_FILES_NAMES lowerCamelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase :int = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="__start__" , lowerCAmelCase_="__end__" , lowerCAmelCase_="__unk__" , lowerCAmelCase_="__null__" , **lowerCAmelCase_ , ) -> int: super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ ) with open(lowerCAmelCase_ , encoding="""utf-8""" ) as vocab_handle: _A = json.load(lowerCAmelCase_ ) _A = {v: k for k, v in self.encoder.items()} with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle: _A = merges_handle.read().split("""\n""" )[1:-1] _A = [tuple(merge.split() ) for merge in merges] _A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _A = {} @property def UpperCAmelCase ( self ) -> int: return len(self.encoder ) def UpperCAmelCase ( self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: if token in self.cache: return self.cache[token] _A = re.sub("""([.,!?()])""" , r""" \1""" , lowerCAmelCase_ ) _A = re.sub("""(')""" , r""" \1 """ , lowerCAmelCase_ ) _A = re.sub(r"""\s{2,}""" , """ """ , lowerCAmelCase_ ) if "\n" in token: _A = token.replace("""\n""" , """ __newln__""" ) _A = token.split(""" """ ) _A = [] for token in tokens: if not len(lowerCAmelCase_ ): continue _A = token.lower() _A = tuple(lowerCAmelCase_ ) _A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) _A = get_pairs(lowerCAmelCase_ ) if not pairs: words.append(lowerCAmelCase_ ) continue while True: _A = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break _A , _A = bigram _A = [] _A = 0 while i < len(lowerCAmelCase_ ): try: _A = word.index(lowerCAmelCase_ , lowerCAmelCase_ ) new_word.extend(word[i:j] ) _A = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(lowerCAmelCase_ ) _A = new_word if len(lowerCAmelCase_ ) == 1: break else: _A = get_pairs(lowerCAmelCase_ ) _A = """@@ """.join(lowerCAmelCase_ ) _A = word[:-4] _A = word words.append(lowerCAmelCase_ ) return " ".join(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]: _A = [] _A = re.findall(r"""\S+\n?""" , lowerCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) ) return split_tokens def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int: _A = token.lower() return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: return self.decoder.get(lowerCAmelCase_ , self.unk_token ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: _A = """ """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip() return out_string def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _A = os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + """\n""" ) _A = 0 with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) _A = token_index writer.write(""" """.join(lowerCAmelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file
83
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['''FNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['''FNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ '''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FNetForMaskedLM''', '''FNetForMultipleChoice''', '''FNetForNextSentencePrediction''', '''FNetForPreTraining''', '''FNetForQuestionAnswering''', '''FNetForSequenceClassification''', '''FNetForTokenClassification''', '''FNetLayer''', '''FNetModel''', '''FNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
721
_SCREAMING_SNAKE_CASE = { 'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...', '8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.', ':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.', '?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-', '(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/' } # Exclamation mark is not in ITU-R recommendation # fmt: on _SCREAMING_SNAKE_CASE = {value: key for key, value in MORSE_CODE_DICT.items()} def snake_case ( snake_case__ :str) -> str: return " ".join(MORSE_CODE_DICT[char] for char in message.upper()) def snake_case ( snake_case__ :str) -> str: return "".join(REVERSE_DICT[char] for char in message.split()) def snake_case ( ) -> None: _A = """Morse code here!""" print(snake_case__) _A = encrypt(snake_case__) print(snake_case__) _A = decrypt(snake_case__) print(snake_case__) if __name__ == "__main__": main()
83
0
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } _SCREAMING_SNAKE_CASE = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def snake_case ( snake_case__ :List[Any]) -> Union[str, Any]: _A = {} with open(snake_case__ , """r""") as file: for line_number, line in enumerate(snake_case__): _A = line.strip() if line: _A = line.split() _A = line_number _A = words[0] _A = value return result def snake_case ( snake_case__ :int , snake_case__ :List[str] , snake_case__ :List[Any] , snake_case__ :List[Any] , snake_case__ :Dict) -> List[Any]: for attribute in key.split("""."""): _A = getattr(snake_case__ , snake_case__) _A = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(snake_case__): _A = PARAM_MAPPING[full_name.split(""".""")[-1]] _A = """param""" if weight_type is not None and weight_type != "param": _A = getattr(snake_case__ , snake_case__).shape elif weight_type is not None and weight_type == "param": _A = hf_pointer for attribute in hf_param_name.split("""."""): _A = getattr(snake_case__ , snake_case__) _A = shape_pointer.shape # let's reduce dimension _A = value[0] else: _A = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''') if weight_type == "weight": _A = value elif weight_type == "weight_g": _A = value elif weight_type == "weight_v": _A = value elif weight_type == "bias": _A = value elif weight_type == "param": for attribute in hf_param_name.split("""."""): _A = getattr(snake_case__ , snake_case__) _A = value else: _A = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''') def snake_case ( snake_case__ :Optional[int] , snake_case__ :Dict , snake_case__ :Optional[Any] , snake_case__ :Optional[Any] , snake_case__ :List[str]) -> int: _A = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(snake_case__): _A = PARAM_MAPPING[full_name.split(""".""")[-1]] _A = """param""" if weight_type is not None and weight_type != "param": _A = """.""".join([key, weight_type]) elif weight_type is not None and weight_type == "param": _A = """.""".join([key, hf_param_name]) else: _A = key _A = value if """lm_head""" in full_key else value[0] _SCREAMING_SNAKE_CASE = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def snake_case ( snake_case__ :List[str] , snake_case__ :Any , snake_case__ :List[Any]=None , snake_case__ :int=None) -> Optional[int]: _A = False for key, mapped_key in MAPPING.items(): _A = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""")[-1] == name.split(""".""")[0]: _A = True if "*" in mapped_key: _A = name.split(snake_case__)[0].split(""".""")[-2] _A = mapped_key.replace("""*""" , snake_case__) if "weight_g" in name: _A = """weight_g""" elif "weight_v" in name: _A = """weight_v""" elif "bias" in name: _A = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj _A = """weight""" else: _A = None if hf_dict is not None: rename_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__) else: set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__) return is_used return is_used def snake_case ( snake_case__ :List[str] , snake_case__ :Dict , snake_case__ :int) -> List[Any]: _A = [] _A = fairseq_model.state_dict() _A = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): _A = False if "conv_layers" in name: load_conv_layer( snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , ) _A = True else: _A = load_wavaveca_layer(snake_case__ , snake_case__ , snake_case__) if not is_used: unused_weights.append(snake_case__) logger.warning(F'''Unused weights: {unused_weights}''') def snake_case ( snake_case__ :Tuple , snake_case__ :Optional[int] , snake_case__ :str , snake_case__ :Any , snake_case__ :List[str]) -> List[Any]: _A = full_name.split("""conv_layers.""")[-1] _A = name.split(""".""") _A = int(items[0]) _A = int(items[1]) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''') _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''') elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''') _A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''') elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''') _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''') elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''') _A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''') else: unused_weights.append(snake_case__) @torch.no_grad() def snake_case ( snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str]=None , snake_case__ :Tuple=None , snake_case__ :Tuple=True , snake_case__ :Optional[Any]=False) -> Any: if config_path is not None: _A = WavaVecaConfig.from_pretrained(snake_case__) else: _A = WavaVecaConfig() if is_seq_class: _A = read_txt_into_dict(snake_case__) _A = idalabel _A = WavaVecaForSequenceClassification(snake_case__) _A = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , ) feature_extractor.save_pretrained(snake_case__) elif is_finetuned: if dict_path: _A = Dictionary.load(snake_case__) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _A = target_dict.pad_index _A = target_dict.bos_index _A = target_dict.eos_index _A = len(target_dict.symbols) _A = os.path.join(snake_case__ , """vocab.json""") if not os.path.isdir(snake_case__): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(snake_case__)) return os.makedirs(snake_case__ , exist_ok=snake_case__) _A = target_dict.indices # fairseq has the <pad> and <s> switched _A = 0 _A = 1 with open(snake_case__ , """w""" , encoding="""utf-8""") as vocab_handle: json.dump(snake_case__ , snake_case__) _A = WavaVecaCTCTokenizer( snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=snake_case__ , ) _A = True if config.feat_extract_norm == """layer""" else False _A = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , ) _A = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__) processor.save_pretrained(snake_case__) _A = WavaVecaForCTC(snake_case__) else: _A = WavaVecaForPreTraining(snake_case__) if is_finetuned or is_seq_class: _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""")[:-1])}) else: _A = argparse.Namespace(task="""audio_pretraining""") _A = fairseq.tasks.setup_task(snake_case__) _A , _A , _A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=snake_case__) _A = model[0].eval() recursively_load_weights(snake_case__ , snake_case__ , not is_finetuned) hf_wavavec.save_pretrained(snake_case__) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) _SCREAMING_SNAKE_CASE = parser.parse_args() _SCREAMING_SNAKE_CASE = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
700
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { 'configuration_jukebox': [ 'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'JukeboxConfig', 'JukeboxPriorConfig', 'JukeboxVQVAEConfig', ], 'tokenization_jukebox': ['JukeboxTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ 'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'JukeboxModel', 'JukeboxPreTrainedModel', 'JukeboxVQVAE', 'JukeboxPrior', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
83
0
from abc import ABC, abstractmethod from argparse import ArgumentParser class a ( lowercase__ ): """simple docstring""" @staticmethod @abstractmethod def UpperCAmelCase ( lowerCAmelCase_ ) -> Union[str, Any]: raise NotImplementedError() @abstractmethod def UpperCAmelCase ( self ) -> Optional[Any]: raise NotImplementedError()
701
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Tuple = '''philschmid/bart-large-cnn-samsum''' lowerCamelCase :Tuple = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) lowerCamelCase :List[Any] = '''summarizer''' lowerCamelCase :List[str] = AutoTokenizer lowerCamelCase :Dict = AutoModelForSeqaSeqLM lowerCamelCase :int = ['''text'''] lowerCamelCase :List[Any] = ['''text'''] def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]: return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: return self.model.generate(**lowerCAmelCase_ )[0] def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
83
0
import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def snake_case ( snake_case__ :Dict , snake_case__ :str , snake_case__ :Union[str, Any]) -> Tuple: _A = OmegaConf.load(snake_case__) _A = torch.load(snake_case__ , map_location="""cpu""")["""model"""] _A = list(state_dict.keys()) # extract state_dict for VQVAE _A = {} _A = """first_stage_model.""" for key in keys: if key.startswith(snake_case__): _A = state_dict[key] # extract state_dict for UNetLDM _A = {} _A = """model.diffusion_model.""" for key in keys: if key.startswith(snake_case__): _A = state_dict[key] _A = config.model.params.first_stage_config.params _A = config.model.params.unet_config.params _A = VQModel(**snake_case__).eval() vqvae.load_state_dict(snake_case__) _A = UNetLDMModel(**snake_case__).eval() unet.load_state_dict(snake_case__) _A = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=snake_case__ , ) _A = LDMPipeline(snake_case__ , snake_case__ , snake_case__) pipeline.save_pretrained(snake_case__) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--config_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
702
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = [ ('bert.bert', 'visual_bert'), ('bert.cls', 'cls'), ('bert.classifier', 'cls'), ('token_type_embeddings_visual', 'visual_token_type_embeddings'), ('position_embeddings_visual', 'visual_position_embeddings'), ('projection', 'visual_projection'), ] _SCREAMING_SNAKE_CASE = [ 'nlvr2_coco_pre_trained.th', 'nlvr2_fine_tuned.th', 'nlvr2_pre_trained.th', 'vcr_coco_pre_train.th', 'vcr_fine_tune.th', 'vcr_pre_train.th', 'vqa_coco_pre_trained.th', 'vqa_fine_tuned.th', 'vqa_pre_trained.th', ] def snake_case ( snake_case__ :Union[str, Any]) -> Dict: _A = torch.load(snake_case__ , map_location="""cpu""") return sd def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :int=rename_keys_prefix) -> Optional[Any]: _A = OrderedDict() _A = torch.arange(config.max_position_embeddings).expand((1, -1)) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue _A = key for name_pair in rename_keys_prefix: _A = new_key.replace(name_pair[0] , name_pair[1]) _A = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately _A = new_d["""cls.predictions.bias"""] return new_d @torch.no_grad() def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple) -> int: assert ( checkpoint_path.split("""/""")[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: _A = """pretraining""" if "vcr" in checkpoint_path: _A = {"""visual_embedding_dim""": 512} elif "vqa_advanced" in checkpoint_path: _A = {"""visual_embedding_dim""": 2_048} elif "vqa" in checkpoint_path: _A = {"""visual_embedding_dim""": 2_048} elif "nlvr" in checkpoint_path: _A = {"""visual_embedding_dim""": 1_024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''') else: if "vcr" in checkpoint_path: _A = {"""visual_embedding_dim""": 512} _A = """multichoice""" elif "vqa_advanced" in checkpoint_path: _A = {"""visual_embedding_dim""": 2_048} _A = """vqa_advanced""" elif "vqa" in checkpoint_path: _A = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129} _A = """vqa""" elif "nlvr" in checkpoint_path: _A = { """visual_embedding_dim""": 1_024, """num_labels""": 2, } _A = """nlvr""" _A = VisualBertConfig(**snake_case__) # Load State Dict _A = load_state_dict(snake_case__) _A = get_new_dict(snake_case__ , snake_case__) if model_type == "pretraining": _A = VisualBertForPreTraining(snake_case__) elif model_type == "vqa": _A = VisualBertForQuestionAnswering(snake_case__) elif model_type == "nlvr": _A = VisualBertForVisualReasoning(snake_case__) elif model_type == "multichoice": _A = VisualBertForMultipleChoice(snake_case__) model.load_state_dict(snake_case__) # Save Checkpoints Path(snake_case__).mkdir(exist_ok=snake_case__) model.save_pretrained(snake_case__) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.') parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.') _SCREAMING_SNAKE_CASE = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
83
0
def snake_case ( snake_case__ :str , snake_case__ :bool = False) -> Optional[Any]: if not isinstance(snake_case_ , snake_case_): _A = F'''Expected string as input, found {type(snake_case_)}''' raise ValueError(snake_case_) if not isinstance(snake_case_ , snake_case_): _A = F'''Expected boolean as use_pascal parameter, found {type(snake_case_)}''' raise ValueError(snake_case_) _A = input_str.split("""_""") _A = 0 if use_pascal else 1 _A = words[start_index:] _A = [word[0].upper() + word[1:] for word in words_to_capitalize] _A = "" if use_pascal else words[0] return "".join([initial_word, *capitalized_words]) if __name__ == "__main__": from doctest import testmod testmod()
703
from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class a ( __lowerCAmelCase ): """simple docstring""" def UpperCAmelCase ( self ) -> List[str]: return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def UpperCAmelCase ( self ) -> Optional[int]: _A = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]} return Dataset.from_dict(lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Dict: _A = self._create_example_records() _A = Dataset.from_list(lowerCAmelCase_ ) self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] ) for i, r in enumerate(lowerCAmelCase_ ): self.assertDictEqual(lowerCAmelCase_ , example_records[i] ) def UpperCAmelCase ( self ) -> str: _A = self._create_example_records() _A = Dataset.from_list(lowerCAmelCase_ ) _A = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} ) self.assertEqual(dset.info , dset_from_dict.info ) def UpperCAmelCase ( self ) -> Any: # checks what happens with missing columns _A = [{"""col_1""": 1}, {"""col_2""": """x"""}] _A = Dataset.from_list(lowerCAmelCase_ ) self.assertDictEqual(dset[0] , {"""col_1""": 1} ) self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns def UpperCAmelCase ( self ) -> Tuple: # checks if the type can be inferred from the second record _A = [{"""col_1""": []}, {"""col_1""": [1, 2]}] _A = Dataset.from_list(lowerCAmelCase_ ) self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) ) def UpperCAmelCase ( self ) -> Any: _A = Dataset.from_list([] ) self.assertEqual(len(lowerCAmelCase_ ) , 0 ) self.assertListEqual(dset.column_names , [] )
83
0
def snake_case ( snake_case__ :int = 4_000_000) -> Optional[Any]: _A = [0, 1] _A = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1]) if fib[i + 2] > n: break i += 1 _A = 0 for j in range(len(_lowerCamelCase) - 1): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(F'''{solution() = }''')
704
def snake_case ( snake_case__ :int = 1_000_000) -> int: _A = set(range(3 , snake_case__ , 2)) primes.add(2) for p in range(3 , snake_case__ , 2): if p not in primes: continue primes.difference_update(set(range(p * p , snake_case__ , snake_case__))) _A = [float(snake_case__) for n in range(limit + 1)] for p in primes: for n in range(snake_case__ , limit + 1 , snake_case__): phi[n] *= 1 - 1 / p return int(sum(phi[2:])) if __name__ == "__main__": print(F'''{solution() = }''')
83
0
'''simple docstring''' import qiskit def snake_case ( snake_case__ :int , snake_case__ :int) -> Tuple: _A = qiskit.Aer.get_backend("""aer_simulator""") # Create a Quantum Circuit acting on the q register _A = qiskit.QuantumCircuit(snake_case__ , snake_case__) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0) circuit.x(1) # Map the quantum measurement to the classical bits circuit.measure([0, 1] , [0, 1]) # Execute the circuit on the qasm simulator _A = qiskit.execute(snake_case__ , snake_case__ , shots=1_000) # Return the histogram data of the results of the experiment. return job.result().get_counts(snake_case__) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = single_qubit_measure(2, 2) print(F'''Total count for various states are: {counts}''')
705
import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class a ( __lowerCAmelCase ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Union[str, Any]: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_input_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = relative_attention _A = position_biased_input _A = pos_att_type _A = scope def UpperCAmelCase ( self ) -> Dict: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self ) -> Optional[int]: return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _A = DebertaVaModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] _A = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] _A = model(lowerCAmelCase_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: _A = DebertaVaForMaskedLM(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: _A = self.num_labels _A = DebertaVaForSequenceClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: _A = self.num_labels _A = DebertaVaForTokenClassification(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]: _A = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: _A = DebertaVaForMultipleChoice(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self ) -> Optional[int]: _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase :int = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) lowerCamelCase :str = ( { '''feature-extraction''': DebertaVaModel, '''fill-mask''': DebertaVaForMaskedLM, '''question-answering''': DebertaVaForQuestionAnswering, '''text-classification''': DebertaVaForSequenceClassification, '''token-classification''': DebertaVaForTokenClassification, '''zero-shot''': DebertaVaForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase :str = True lowerCamelCase :Union[str, Any] = False lowerCamelCase :Optional[int] = False lowerCamelCase :List[str] = False lowerCamelCase :str = False def UpperCAmelCase ( self ) -> Optional[int]: _A = DebertaVaModelTester(self ) _A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 ) def UpperCAmelCase ( self ) -> List[str]: self.config_tester.run_common_tests() def UpperCAmelCase ( self ) -> List[str]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Dict: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> int: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Dict: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Optional[int]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ ) @slow def UpperCAmelCase ( self ) -> Any: for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = DebertaVaModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class a ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason="""Model not available yet""" ) def UpperCAmelCase ( self ) -> int: pass @slow def UpperCAmelCase ( self ) -> Optional[Any]: _A = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" ) _A = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] ) _A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0] # compare the actual values for a slice. _A = torch.tensor( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
83
0
import numpy as np def snake_case ( snake_case__ :np.array) -> int: return (2 / (1 + np.exp(-2 * vector))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
706
def snake_case ( snake_case__ :int , snake_case__ :int) -> int: return int(input_a == input_a == 0) def snake_case ( ) -> None: print("""Truth Table of NOR Gate:""") print("""| Input 1 | Input 2 | Output |""") print(F'''| 0 | 0 | {nor_gate(0 , 0)} |''') print(F'''| 0 | 1 | {nor_gate(0 , 1)} |''') print(F'''| 1 | 0 | {nor_gate(1 , 0)} |''') print(F'''| 1 | 1 | {nor_gate(1 , 1)} |''') if __name__ == "__main__": import doctest doctest.testmod() main()
83
0
'''simple docstring''' def snake_case ( snake_case__ :List[Any] , snake_case__ :List[str]) -> int: return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def snake_case ( snake_case__ :Any , snake_case__ :Any=0) -> Optional[Any]: return sorted(__snake_case , key=lambda snake_case__: x[column]) def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :Union[str, Any]=float("""inf""")) -> int: for i in range(points_counts - 1): for j in range(i + 1 , __snake_case): _A = euclidean_distance_sqr(points[i] , points[j]) if current_dis < min_dis: _A = current_dis return min_dis def snake_case ( snake_case__ :Dict , snake_case__ :int , snake_case__ :Optional[int]=float("""inf""")) -> Dict: for i in range(min(6 , points_counts - 1) , __snake_case): for j in range(max(0 , i - 6) , __snake_case): _A = euclidean_distance_sqr(points[i] , points[j]) if current_dis < min_dis: _A = current_dis return min_dis def snake_case ( snake_case__ :str , snake_case__ :Optional[int] , snake_case__ :Optional[int]) -> Union[str, Any]: if points_counts <= 3: return dis_between_closest_pair(__snake_case , __snake_case) # recursion _A = points_counts // 2 _A = closest_pair_of_points_sqr( __snake_case , points_sorted_on_y[:mid] , __snake_case) _A = closest_pair_of_points_sqr( __snake_case , points_sorted_on_y[mid:] , points_counts - mid) _A = min(__snake_case , __snake_case) _A = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0]) < closest_pair_dis: cross_strip.append(__snake_case) _A = dis_between_closest_in_strip( __snake_case , len(__snake_case) , __snake_case) return min(__snake_case , __snake_case) def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[Any]) -> int: _A = column_based_sort(__snake_case , column=0) _A = column_based_sort(__snake_case , column=1) return ( closest_pair_of_points_sqr( __snake_case , __snake_case , __snake_case) ) ** 0.5 if __name__ == "__main__": _SCREAMING_SNAKE_CASE = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print('Distance:', closest_pair_of_points(points, len(points)))
707
import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class a : """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=sys.maxsize ) -> str: _A = """bilinear""" _A = max_size _A = short_edge_length def __call__( self , lowerCAmelCase_ ) -> Optional[Any]: _A = [] for img in imgs: _A , _A = img.shape[:2] # later: provide list and randomly choose index for resize _A = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img _A = size * 1.0 / min(lowerCAmelCase_ , lowerCAmelCase_ ) if h < w: _A , _A = size, scale * w else: _A , _A = scale * h, size if max(lowerCAmelCase_ , lowerCAmelCase_ ) > self.max_size: _A = self.max_size * 1.0 / max(lowerCAmelCase_ , lowerCAmelCase_ ) _A = newh * scale _A = neww * scale _A = int(neww + 0.5 ) _A = int(newh + 0.5 ) if img.dtype == np.uinta: _A = Image.fromarray(lowerCAmelCase_ ) _A = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) _A = np.asarray(lowerCAmelCase_ ) else: _A = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw _A = nn.functional.interpolate( lowerCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase_ ).squeeze(0 ) img_augs.append(lowerCAmelCase_ ) return img_augs class a : """simple docstring""" def __init__( self , lowerCAmelCase_ ) -> List[Any]: _A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) _A = cfg.INPUT.FORMAT _A = cfg.SIZE_DIVISIBILITY _A = cfg.PAD_VALUE _A = cfg.INPUT.MAX_SIZE_TEST _A = cfg.MODEL.DEVICE _A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) _A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) _A = lambda lowerCAmelCase_ : (x - self.pixel_mean) / self.pixel_std def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: _A = tuple(max(lowerCAmelCase_ ) for s in zip(*[img.shape for img in images] ) ) _A = [im.shape[-2:] for im in images] _A = [ nn.functional.pad( lowerCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(lowerCAmelCase_ , lowerCAmelCase_ ) ] return torch.stack(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ ) def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int: with torch.no_grad(): if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _A = [images] if single_image: assert len(lowerCAmelCase_ ) == 1 for i in range(len(lowerCAmelCase_ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(lowerCAmelCase_ , images.pop(lowerCAmelCase_ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( lowerCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase_ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge _A = torch.tensor([im.shape[:2] for im in images] ) _A = self.aug(lowerCAmelCase_ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic _A = [self.normalizer(lowerCAmelCase_ ) for x in images] # now pad them to do the following operations _A , _A = self.pad(lowerCAmelCase_ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad _A = torch.true_divide(lowerCAmelCase_ , lowerCAmelCase_ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[Any]) -> Tuple: boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def snake_case ( snake_case__ :Optional[int] , snake_case__ :Tuple[int, int]) -> Optional[Any]: assert torch.isfinite(snake_case__).all(), "Box tensor contains infinite or NaN!" _A , _A = box_size tensor[:, 0].clamp_(min=0 , max=snake_case__) tensor[:, 1].clamp_(min=0 , max=snake_case__) tensor[:, 2].clamp_(min=0 , max=snake_case__) tensor[:, 3].clamp_(min=0 , max=snake_case__)
83
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { "configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"], "tokenization_luke": ["LukeTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "LUKE_PRETRAINED_MODEL_ARCHIVE_LIST", "LukeForEntityClassification", "LukeForEntityPairClassification", "LukeForEntitySpanClassification", "LukeForMultipleChoice", "LukeForQuestionAnswering", "LukeForSequenceClassification", "LukeForTokenClassification", "LukeForMaskedLM", "LukeModel", "LukePreTrainedModel", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
708
from collections import defaultdict def snake_case ( snake_case__ :int) -> int: _A = 1 _A = True for v in tree[start]: if v not in visited: ret += dfs(snake_case__) if ret % 2 == 0: cuts.append(snake_case__) return ret def snake_case ( ) -> Any: dfs(1) if __name__ == "__main__": _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10, 9 _SCREAMING_SNAKE_CASE = defaultdict(list) _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
83
0
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class a ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Dict: for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): _A = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(lowerCAmelCase__ ) def UpperCAmelCase ( self ) -> List[Any]: _A = "sshleifer/tiny-gpt2" _A = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCAmelCase__ , inference=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=lowerCAmelCase__ , multi_process=lowerCAmelCase__ , ) _A = TensorFlowBenchmark(lowerCAmelCase__ ) _A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase ( self ) -> str: _A = "sgugger/tiny-distilbert-classification" _A = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCAmelCase__ , inference=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase__ , only_pretrain_model=lowerCAmelCase__ , ) _A = TensorFlowBenchmark(lowerCAmelCase__ ) _A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase ( self ) -> str: _A = "sshleifer/tiny-gpt2" _A = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCAmelCase__ , inference=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase__ , ) _A = TensorFlowBenchmark(lowerCAmelCase__ ) _A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase ( self ) -> Union[str, Any]: _A = "sshleifer/tiny-gpt2" _A = AutoConfig.from_pretrained(lowerCAmelCase__ ) _A = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCAmelCase__ , inference=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=lowerCAmelCase__ , multi_process=lowerCAmelCase__ , ) _A = TensorFlowBenchmark(lowerCAmelCase__ , [config] ) _A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase ( self ) -> Optional[Any]: _A = "sshleifer/tiny-gpt2" _A = AutoConfig.from_pretrained(lowerCAmelCase__ ) _A = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCAmelCase__ , inference=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase__ , ) _A = TensorFlowBenchmark(lowerCAmelCase__ , [config] ) _A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase ( self ) -> Tuple: _A = "sshleifer/tiny-gpt2" _A = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCAmelCase__ , inference=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase__ , ) _A = TensorFlowBenchmark(lowerCAmelCase__ ) _A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCAmelCase ( self ) -> Optional[Any]: _A = "sshleifer/tiny-gpt2" _A = AutoConfig.from_pretrained(lowerCAmelCase__ ) _A = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCAmelCase__ , inference=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase__ , ) _A = TensorFlowBenchmark(lowerCAmelCase__ , [config] ) _A = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCAmelCase ( self ) -> int: _A = "patrickvonplaten/t5-tiny-random" _A = AutoConfig.from_pretrained(lowerCAmelCase__ ) _A = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCAmelCase__ , inference=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase__ , ) _A = TensorFlowBenchmark(lowerCAmelCase__ , configs=[config] ) _A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" ) def UpperCAmelCase ( self ) -> str: _A = "sshleifer/tiny-gpt2" _A = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=lowerCAmelCase__ , inference=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=lowerCAmelCase__ , multi_process=lowerCAmelCase__ , ) _A = TensorFlowBenchmark(lowerCAmelCase__ ) _A = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase ( self ) -> Optional[int]: _A = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: _A = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=lowerCAmelCase__ , save_to_csv=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowerCAmelCase__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(lowerCAmelCase__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(lowerCAmelCase__ , """env.csv""" ) , multi_process=lowerCAmelCase__ , ) _A = TensorFlowBenchmark(lowerCAmelCase__ ) benchmark.run() self.assertTrue(Path(os.path.join(lowerCAmelCase__ , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(lowerCAmelCase__ , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(lowerCAmelCase__ , """env.csv""" ) ).exists() ) def UpperCAmelCase ( self ) -> int: _A = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(lowerCAmelCase_ ): self.assertTrue(hasattr(lowerCAmelCase__ , """sequential""" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , """cumulative""" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , """current""" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: _A = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=lowerCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowerCAmelCase__ , """log.txt""" ) , log_print=lowerCAmelCase__ , trace_memory_line_by_line=lowerCAmelCase__ , eager_mode=lowerCAmelCase__ , multi_process=lowerCAmelCase__ , ) _A = TensorFlowBenchmark(lowerCAmelCase__ ) _A = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(lowerCAmelCase__ , """log.txt""" ) ).exists() )
709
import heapq def snake_case ( snake_case__ :dict) -> set[int]: _A = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(snake_case__ , [-1 * len(snake_case__), (key, value)]) # chosen_vertices = set of chosen vertices _A = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices _A = heapq.heappop(snake_case__)[1][0] chosen_vertices.add(snake_case__) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: _A = elem[1][1].index(snake_case__) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(snake_case__) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() _SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
83
0
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass _SCREAMING_SNAKE_CASE = (3, 9, -11, 0, 7, 5, 1, -1) _SCREAMING_SNAKE_CASE = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class a : """simple docstring""" lowerCamelCase :Optional[int] = 42 lowerCamelCase :Tuple = 42 class a : """simple docstring""" def __init__( self , lowerCAmelCase_ ) -> None: _A = None for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ): _A = Node(__lowerCAmelCase , self.head ) def __iter__( self ) -> Iterator[int]: _A = self.head while node: yield node.data _A = node.next_node def __len__( self ) -> int: return sum(1 for _ in self ) def __str__( self ) -> str: return " -> ".join([str(__lowerCAmelCase ) for node in self] ) def snake_case ( snake_case__ :SortedLinkedList , snake_case__ :SortedLinkedList) -> List[str]: return SortedLinkedList(list(UpperCAmelCase__) + list(UpperCAmelCase__)) if __name__ == "__main__": import doctest doctest.testmod() _SCREAMING_SNAKE_CASE = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
710
import math import unittest def snake_case ( snake_case__ :int) -> bool: assert isinstance(snake_case__ , snake_case__) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__) + 1) , 6): if number % i == 0 or number % (i + 2) == 0: return False return True class a ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> List[Any]: self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def UpperCAmelCase ( self ) -> Dict: with self.assertRaises(lowerCAmelCase_ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , ) self.assertFalse( is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
83
0
from __future__ import annotations def snake_case ( snake_case__ :int , snake_case__ :List[str] , snake_case__ :int , snake_case__ :Any) -> Optional[int]: if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): _A , _A = array[indexa], array[indexa] def snake_case ( snake_case__ :List[Any] , snake_case__ :Optional[Any] , snake_case__ :List[str] , snake_case__ :int) -> Optional[Any]: if length > 1: _A = int(length / 2) for i in range(_snake_case , low + middle): comp_and_swap(_snake_case , _snake_case , i + middle , _snake_case) bitonic_merge(_snake_case , _snake_case , _snake_case , _snake_case) bitonic_merge(_snake_case , low + middle , _snake_case , _snake_case) def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :Optional[int] , snake_case__ :str , snake_case__ :str) -> Union[str, Any]: if length > 1: _A = int(length / 2) bitonic_sort(_snake_case , _snake_case , _snake_case , 1) bitonic_sort(_snake_case , low + middle , _snake_case , 0) bitonic_merge(_snake_case , _snake_case , _snake_case , _snake_case) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = input('Enter numbers separated by a comma:\n').strip() _SCREAMING_SNAKE_CASE = [int(item.strip()) for item in user_input.split(',')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('\nSorted array in ascending order is: ', end='') print(*unsorted, sep=', ') bitonic_merge(unsorted, 0, len(unsorted), 0) print('Sorted array in descending order is: ', end='') print(*unsorted, sep=', ')
711
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = {'configuration_encoder_decoder': ['EncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['EncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['TFEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['FlaxEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
83
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = '▁' _SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} _SCREAMING_SNAKE_CASE = { 'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}, 'tokenizer_file': { 'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json' }, } _SCREAMING_SNAKE_CASE = { 'google/pegasus-xsum': 512, } class a ( UpperCamelCase_ ): """simple docstring""" lowerCamelCase :int = VOCAB_FILES_NAMES lowerCamelCase :str = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase :Any = PegasusTokenizer lowerCamelCase :Optional[int] = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="<pad>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<mask_2>" , lowerCAmelCase_="<mask_1>" , lowerCAmelCase_=None , lowerCAmelCase_=1_03 , **lowerCAmelCase_ , ) -> Tuple: _A = offset if additional_special_tokens is not None: if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise TypeError( F'''additional_special_tokens should be of type {type(UpperCamelCase__ )}, but is''' F''' {type(UpperCamelCase__ )}''' ) _A = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F'''<unk_{i}>''' for i in range(len(UpperCamelCase__ ) , self.offset - 1 ) ] if len(set(UpperCamelCase__ ) ) != len(UpperCamelCase__ ): raise ValueError( """Please make sure that the provided additional_special_tokens do not contain an incorrectly""" F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) _A = additional_special_tokens_extended else: _A = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , pad_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , mask_token_sent=UpperCamelCase__ , offset=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , ) _A = vocab_file _A = False if not self.vocab_file else True def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]: _A = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( """There should be 3 special tokens: mask_token, pad_token, and eos_token +""" F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> Any: if already_has_special_tokens: return self._special_token_mask(UpperCamelCase__ ) elif token_ids_a is None: return self._special_token_mask(UpperCamelCase__ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Dict: if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Dict: if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ): copyfile(self.vocab_file , UpperCamelCase__ ) return (out_vocab_file,)
712
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) @add_end_docstrings(__lowerCAmelCase ) class a ( __lowerCAmelCase ): """simple docstring""" def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]: super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) self.check_model_type(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple: _A , _A = {}, {} if padding is not None: _A = padding if truncation is not None: _A = truncation if top_k is not None: _A = top_k return preprocess_params, {}, postprocess_params def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ) -> Union[str, Any]: if isinstance(lowerCAmelCase_ , (Image.Image, str) ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _A = {"""image""": image, """question""": question} else: _A = image _A = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ ) return results def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any: _A = load_image(inputs["""image"""] ) _A = self.tokenizer( inputs["""question"""] , return_tensors=self.framework , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ ) _A = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework ) model_inputs.update(lowerCAmelCase_ ) return model_inputs def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: _A = self.model(**lowerCAmelCase_ ) return model_outputs def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> Union[str, Any]: if top_k > self.model.config.num_labels: _A = self.model.config.num_labels if self.framework == "pt": _A = model_outputs.logits.sigmoid()[0] _A , _A = probs.topk(lowerCAmelCase_ ) else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) _A = scores.tolist() _A = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
83
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class a ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=30 , lowerCAmelCase_=4_00 , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=0.9 , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[0.5, 0.5, 0.5] , lowerCAmelCase_=[0.5, 0.5, 0.5] , ) -> int: _A = size if size is not None else {"""shortest_edge""": 30} _A = crop_size if crop_size is not None else {"""height""": 30, """width""": 30} _A = parent _A = batch_size _A = num_channels _A = min_resolution _A = max_resolution _A = do_resize_and_center_crop _A = size _A = crop_pct _A = crop_size _A = do_normalize _A = image_mean _A = image_std def UpperCAmelCase ( self ) -> List[str]: return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class a ( __lowerCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase :Tuple = PoolFormerImageProcessor if is_vision_available() else None def UpperCAmelCase ( self ) -> int: _A = PoolFormerImageProcessingTester(self ) @property def UpperCAmelCase ( self ) -> Dict: return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase ( self ) -> Dict: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_resize_and_center_crop""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """size""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """crop_pct""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_normalize""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_mean""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_std""" ) ) def UpperCAmelCase ( self ) -> Optional[Any]: _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 30} ) self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} ) _A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def UpperCAmelCase ( self ) -> Optional[int]: pass def UpperCAmelCase ( self ) -> List[Any]: _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _A = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCAmelCase ( self ) -> str: _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _A = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCAmelCase ( self ) -> Tuple: _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _A = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
713
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :str , snake_case__ :PreTrainedTokenizer , snake_case__ :int , snake_case__ :Optional[int] = None , ) -> Optional[int]: _A = {} if train_file is not None: _A = [train_file] if eval_file is not None: _A = [eval_file] if test_file is not None: _A = [test_file] _A = datasets.load_dataset("""csv""" , data_files=snake_case__) _A = list(ds[list(files.keys())[0]].features.keys()) _A = features_name.pop(snake_case__) _A = list(set(ds[list(files.keys())[0]][label_name])) _A = {label: i for i, label in enumerate(snake_case__)} _A = tokenizer.model_input_names _A = {} if len(snake_case__) == 1: for k in files.keys(): _A = ds[k].map( lambda snake_case__: tokenizer.batch_encode_plus( example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""") , batched=snake_case__ , ) elif len(snake_case__) == 2: for k in files.keys(): _A = ds[k].map( lambda snake_case__: tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""" , ) , batched=snake_case__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) _A = ( tf.data.Dataset.from_generator( snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: _A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN]))) _A = ( tf.data.Dataset.from_generator( snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: _A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION]))) _A = ( tf.data.Dataset.from_generator( snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: _A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST]))) return train_ds, val_ds, test_ds, labelaid _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) @dataclass class a : """simple docstring""" lowerCamelCase :int = field(metadata={'''help''': '''Which column contains the label'''} ) lowerCamelCase :str = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the training file'''} ) lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the development file'''} ) lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the test file'''} ) lowerCamelCase :int = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) lowerCamelCase :bool = field( default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class a : """simple docstring""" lowerCamelCase :str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) lowerCamelCase :Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCamelCase :Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. lowerCamelCase :Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def snake_case ( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) _A , _A , _A = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""") # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.info( F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, ''' F'''16-bits training: {training_args.fpaa}''') logger.info(F'''Training/evaluation parameters {training_args}''') # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _A , _A , _A , _A = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) _A = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): _A = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , ) def compute_metrics(snake_case__ :EvalPrediction) -> Dict: _A = np.argmax(p.predictions , axis=1) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer _A = TFTrainer( model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir) # Evaluation _A = {} if training_args.do_eval: logger.info("""*** Evaluate ***""") _A = trainer.evaluate() _A = os.path.join(training_args.output_dir , """eval_results.txt""") with open(snake_case__ , """w""") as writer: logger.info("""***** Eval results *****""") for key, value in result.items(): logger.info(F''' {key} = {value}''') writer.write(F'''{key} = {value}\n''') results.update(snake_case__) return results if __name__ == "__main__": main()
83
0
import numpy as np def snake_case ( snake_case__ :List[Any]) -> Tuple: return (2 / (1 + np.exp(-2 * vector))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
714
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Union[str, Any] = '''speech_to_text''' lowerCamelCase :List[str] = ['''past_key_values'''] lowerCamelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , lowerCAmelCase_=1_00_00 , lowerCAmelCase_=12 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=60_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=2 , lowerCAmelCase_=(5, 5) , lowerCAmelCase_=10_24 , lowerCAmelCase_=80 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Tuple: _A = vocab_size _A = d_model _A = encoder_ffn_dim _A = encoder_layers _A = encoder_attention_heads _A = decoder_ffn_dim _A = decoder_layers _A = decoder_attention_heads _A = dropout _A = attention_dropout _A = activation_dropout _A = activation_function _A = init_std _A = encoder_layerdrop _A = decoder_layerdrop _A = use_cache _A = encoder_layers _A = scale_embedding # scale factor will be sqrt(d_model) if True _A = max_source_positions _A = max_target_positions _A = num_conv_layers _A = list(lowerCAmelCase_ ) _A = conv_channels _A = input_feat_per_channel _A = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """ F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' F'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
83
0
import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model'} _SCREAMING_SNAKE_CASE = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', } } # TODO(PVP) - this should be removed in Transformers v5 _SCREAMING_SNAKE_CASE = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } _SCREAMING_SNAKE_CASE = '▁' class a ( lowercase__ ): """simple docstring""" lowerCamelCase :List[Any] = VOCAB_FILES_NAMES lowerCamelCase :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase :Dict = ["""input_ids""", """attention_mask"""] def __init__( self , lowerCAmelCase_ , lowerCAmelCase_="</s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_=1_00 , lowerCAmelCase_=None , lowerCAmelCase_ = None , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> str: # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: _A = [F'''<extra_id_{i}>''' for i in range(__lowercase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens _A = len(set(filter(lambda lowerCAmelCase_ : bool("""extra_id""" in str(__lowercase ) ) , __lowercase ) ) ) if extra_tokens != extra_ids: raise ValueError( F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) if legacy: logger.warning_once( F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to''' """ read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" ) _A = legacy _A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , extra_ids=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , legacy=__lowercase , **__lowercase , ) _A = vocab_file _A = extra_ids _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowercase ) @staticmethod def UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: _A = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" F''' {pretrained_model_name_or_path} automatically truncating your input to''' F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , __lowercase , ) return max_model_length @property def UpperCAmelCase ( self ) -> Tuple: return self.sp_model.get_piece_size() + self._extra_ids def UpperCAmelCase ( self ) -> str: _A = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[str]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(__lowercase )) + [1] return ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1] def UpperCAmelCase ( self ) -> List[Any]: return list( set(filter(lambda lowerCAmelCase_ : bool(re.search(r"""<extra_id_\d+>""" , __lowercase ) ) is not None , self.additional_special_tokens ) ) ) def UpperCAmelCase ( self ) -> List[str]: return [self._convert_token_to_id(__lowercase ) for token in self.get_sentinel_tokens()] def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any: if len(__lowercase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated''' """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[str]: _A = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[Any]: _A = self._add_eos_if_not_present(__lowercase ) if token_ids_a is None: return token_ids_a else: _A = self._add_eos_if_not_present(__lowercase ) return token_ids_a + token_ids_a def __getstate__( self ) -> Tuple: _A = self.__dict__.copy() _A = None return state def __setstate__( self , lowerCAmelCase_ ) -> List[str]: _A = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _A = {} _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[int]: # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at # the beginning of the text if not self.legacy: _A = SPIECE_UNDERLINE + text.replace(__lowercase , """ """ ) return super().tokenize(__lowercase , **__lowercase ) def UpperCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Tuple: if not self.legacy: _A = text.startswith(__lowercase ) if is_first: _A = text[1:] _A = self.sp_model.encode(__lowercase , out_type=__lowercase ) if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(__lowercase ): _A = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: if token.startswith("""<extra_id_""" ): _A = re.match(r"""<extra_id_(\d+)>""" , __lowercase ) _A = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(__lowercase ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: if index < self.sp_model.get_piece_size(): _A = self.sp_model.IdToPiece(__lowercase ) else: _A = F'''<extra_id_{self.vocab_size - 1 - index}>''' return token def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Dict: _A = [] _A = """""" _A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__lowercase ) + token _A = True _A = [] else: current_sub_tokens.append(__lowercase ) _A = False out_string += self.sp_model.decode(__lowercase ) return out_string.strip() def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> int: if not os.path.isdir(__lowercase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowercase ) elif not os.path.isfile(self.vocab_file ): with open(__lowercase , """wb""" ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(__lowercase ) return (out_vocab_file,)
715
from __future__ import annotations from collections.abc import Callable def snake_case ( snake_case__ :Callable[[int | float], int | float] , snake_case__ :int | float , snake_case__ :int | float , snake_case__ :int = 100 , ) -> float: _A = x_start _A = fnc(snake_case__) _A = 0.0 for _ in range(snake_case__): # Approximates small segments of curve as linear and solve # for trapezoidal area _A = (x_end - x_start) / steps + xa _A = fnc(snake_case__) area += abs(fxa + fxa) * (xa - xa) / 2 # Increment step _A = xa _A = fxa return area if __name__ == "__main__": def snake_case ( snake_case__ :Tuple) -> List[str]: return x**3 + x**2 print('f(x) = x^3 + x^2') print('The area between the curve, x = -5, x = 5 and the x axis is:') _SCREAMING_SNAKE_CASE = 10 while i <= 100_000: print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''') i *= 10
83
0
from __future__ import annotations from typing import Any class a : """simple docstring""" def __init__( self , lowerCAmelCase_ = 6 ) -> Union[str, Any]: _A = None _A = None self.create_linked_list(A_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int: _A = Node() _A = current_node _A = current_node _A = current_node for _ in range(1 , A_ ): _A = Node() _A = current_node _A = previous_node _A = current_node _A = self.front _A = previous_node def UpperCAmelCase ( self ) -> List[str]: return ( self.front == self.rear and self.front is not None and self.front.data is None ) def UpperCAmelCase ( self ) -> Any: self.check_can_perform_operation() return self.front.data if self.front else None def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]: if self.rear is None: return self.check_is_full() if not self.is_empty(): _A = self.rear.next if self.rear: _A = data def UpperCAmelCase ( self ) -> List[Any]: self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: _A = self.front.data _A = None return data _A = self.front _A = old_front.next _A = old_front.data _A = None return data def UpperCAmelCase ( self ) -> Dict: if self.is_empty(): raise Exception("""Empty Queue""" ) def UpperCAmelCase ( self ) -> Dict: if self.rear and self.rear.next == self.front: raise Exception("""Full Queue""" ) class a : """simple docstring""" def __init__( self ) -> int: _A = None _A = None _A = None if __name__ == "__main__": import doctest doctest.testmod()
716
import numpy as np import qiskit def snake_case ( snake_case__ :int = 8 , snake_case__ :int | None = None) -> str: _A = np.random.default_rng(seed=snake_case__) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. _A = 6 * key_len # Measurement basis for Alice's qubits. _A = rng.integers(2 , size=snake_case__) # The set of states Alice will prepare. _A = rng.integers(2 , size=snake_case__) # Measurement basis for Bob's qubits. _A = rng.integers(2 , size=snake_case__) # Quantum Circuit to simulate BB84 _A = qiskit.QuantumCircuit(snake_case__ , name="""BB84""") # Alice prepares her qubits according to rules above. for index, _ in enumerate(snake_case__): if alice_state[index] == 1: bbaa_circ.x(snake_case__) if alice_basis[index] == 1: bbaa_circ.h(snake_case__) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(snake_case__): if bob_basis[index] == 1: bbaa_circ.h(snake_case__) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. _A = qiskit.Aer.get_backend("""aer_simulator""") # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. _A = qiskit.execute(snake_case__ , snake_case__ , shots=1 , seed_simulator=snake_case__) # Returns the result of measurement. _A = job.result().get_counts(snake_case__).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. _A = """""".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( snake_case__ , snake_case__ , snake_case__) if alice_basis_bit == bob_basis_bit ]) # Get final key. Pad with 0 if too short, otherwise truncate. _A = gen_key[:key_len] if len(snake_case__) >= key_len else gen_key.ljust(snake_case__ , """0""") return key if __name__ == "__main__": print(F'''The generated key is : {bbaa(8, seed=0)}''') from doctest import testmod testmod()
83
0
'''simple docstring''' from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" lowerCamelCase :Optional[int] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""] @register_to_config def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = 5_02_57 , lowerCAmelCase_ = 10_24 , lowerCAmelCase_ = 7_68 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = None , lowerCAmelCase_ = "gelu_new" , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 1E-5 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = False , lowerCAmelCase_ = False , ) -> Tuple: super().__init__() _A = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' F''' `n_embd`: {n_embd} are not equal.''' ) _A = prefix_inner_dim _A = prefix_hidden_dim _A = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) _A = ( nn.Linear(self.prefix_hidden_dim , lowerCAmelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity() ) _A = GPTaConfig( vocab_size=lowerCAmelCase_ , n_positions=lowerCAmelCase_ , n_embd=lowerCAmelCase_ , n_layer=lowerCAmelCase_ , n_head=lowerCAmelCase_ , n_inner=lowerCAmelCase_ , activation_function=lowerCAmelCase_ , resid_pdrop=lowerCAmelCase_ , embd_pdrop=lowerCAmelCase_ , attn_pdrop=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , initializer_range=lowerCAmelCase_ , scale_attn_weights=lowerCAmelCase_ , use_cache=lowerCAmelCase_ , scale_attn_by_inverse_layer_idx=lowerCAmelCase_ , reorder_and_upcast_attn=lowerCAmelCase_ , ) _A = GPTaLMHeadModel(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> str: _A = self.transformer.transformer.wte(lowerCAmelCase_ ) _A = self.encode_prefix(lowerCAmelCase_ ) _A = self.decode_prefix(lowerCAmelCase_ ) _A = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: _A = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) _A = torch.cat((dummy_token, input_ids) , dim=1 ) _A = self.transformer(inputs_embeds=lowerCAmelCase_ , labels=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> torch.Tensor: return torch.zeros(lowerCAmelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]: return self.encode_prefix(lowerCAmelCase_ ) @torch.no_grad() def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]: _A = torch.split(lowerCAmelCase_ , 1 , dim=0 ) _A = [] _A = [] for feature in features: _A = self.decode_prefix(feature.to(lowerCAmelCase_ ) ) # back to the clip feature # Only support beam search for now _A , _A = self.generate_beam( input_embeds=lowerCAmelCase_ , device=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) _A = torch.stack(lowerCAmelCase_ ) _A = torch.stack(lowerCAmelCase_ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_ = 5 , lowerCAmelCase_ = 67 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = None , ) -> str: _A = eos_token_id _A = None _A = None _A = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=torch.int ) _A = torch.zeros(lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=torch.bool ) if input_embeds is not None: _A = input_embeds else: _A = self.transformer.transformer.wte(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ ): _A = self.transformer(inputs_embeds=lowerCAmelCase_ ) _A = outputs.logits _A = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) _A = logits.softmax(-1 ).log() if scores is None: _A , _A = logits.topk(lowerCAmelCase_ , -1 ) _A = generated.expand(lowerCAmelCase_ , *generated.shape[1:] ) _A , _A = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: _A = next_tokens else: _A = tokens.expand(lowerCAmelCase_ , *tokens.shape[1:] ) _A = torch.cat((tokens, next_tokens) , dim=1 ) else: _A = -float(np.inf ) _A = 0 _A = scores[:, None] + logits seq_lengths[~is_stopped] += 1 _A = scores_sum / seq_lengths[:, None] _A , _A = scores_sum_average.view(-1 ).topk(lowerCAmelCase_ , -1 ) _A = next_tokens // scores_sum.shape[1] _A = seq_lengths[next_tokens_source] _A = next_tokens % scores_sum.shape[1] _A = next_tokens.unsqueeze(1 ) _A = tokens[next_tokens_source] _A = torch.cat((tokens, next_tokens) , dim=1 ) _A = generated[next_tokens_source] _A = scores_sum_average * seq_lengths _A = is_stopped[next_tokens_source] _A = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) _A = torch.cat((generated, next_token_embed) , dim=1 ) _A = is_stopped + next_tokens.eq(lowerCAmelCase_ ).squeeze() if is_stopped.all(): break _A = scores / seq_lengths _A = scores.argsort(descending=lowerCAmelCase_ ) # tokens tensors are already padded to max_seq_length _A = [tokens[i] for i in order] _A = torch.stack(lowerCAmelCase_ , dim=0 ) _A = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
717
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def snake_case ( snake_case__ :int) -> Optional[int]: return EnvironmentCommand() def snake_case ( snake_case__ :Tuple) -> List[str]: return EnvironmentCommand(args.accelerate_config_file) class a ( __lowerCAmelCase ): """simple docstring""" @staticmethod def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple: _A = parser.add_parser("""env""" ) download_parser.set_defaults(func=lowerCAmelCase_ ) download_parser.add_argument( """--accelerate-config_file""" , default=lowerCAmelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , ) download_parser.set_defaults(func=lowerCAmelCase_ ) def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ ) -> None: _A = accelerate_config_file def UpperCAmelCase ( self ) -> Dict: _A = """not installed""" if is_safetensors_available(): import safetensors _A = safetensors.__version__ elif importlib.util.find_spec("""safetensors""" ) is not None: import safetensors _A = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.''' _A = """not installed""" _A = _A = """not found""" if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file _A = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ): _A = load_config_from_file(self._accelerate_config_file ).to_dict() _A = ( """\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else F'''\t{accelerate_config}''' ) _A = """not installed""" _A = """NA""" if is_torch_available(): import torch _A = torch.__version__ _A = torch.cuda.is_available() _A = """not installed""" _A = """NA""" if is_tf_available(): import tensorflow as tf _A = tf.__version__ try: # deprecated in v2.1 _A = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool _A = bool(tf.config.list_physical_devices("""GPU""" ) ) _A = """not installed""" _A = """not installed""" _A = """not installed""" _A = """NA""" if is_flax_available(): import flax import jax import jaxlib _A = flax.__version__ _A = jax.__version__ _A = jaxlib.__version__ _A = jax.lib.xla_bridge.get_backend().platform _A = { """`transformers` version""": version, """Platform""": platform.platform(), """Python version""": platform.python_version(), """Huggingface_hub version""": huggingface_hub.__version__, """Safetensors version""": F'''{safetensors_version}''', """Accelerate version""": F'''{accelerate_version}''', """Accelerate config""": F'''{accelerate_config_str}''', """PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''', """Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''', """Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''', """Jax version""": F'''{jax_version}''', """JaxLib version""": F'''{jaxlib_version}''', """Using GPU in script?""": """<fill in>""", """Using distributed or parallel set-up in script?""": """<fill in>""", } print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" ) print(self.format_dict(lowerCAmelCase_ ) ) return info @staticmethod def UpperCAmelCase ( lowerCAmelCase_ ) -> Tuple: return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
83
0
from __future__ import annotations from typing import Generic, TypeVar _SCREAMING_SNAKE_CASE = TypeVar('T') class a ( Generic[T] ): """simple docstring""" def __init__( self , lowerCAmelCase_ ) -> None: _A = data _A = self _A = 0 class a ( Generic[T] ): """simple docstring""" def __init__( self ) -> None: _A = {} def UpperCAmelCase ( self , lowerCAmelCase_ ) -> None: _A = DisjointSetTreeNode(lowerCamelCase__ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> DisjointSetTreeNode[T]: _A = self.map[data] if elem_ref != elem_ref.parent: _A = self.find_set(elem_ref.parent.data ) return elem_ref.parent def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None: if nodea.rank > nodea.rank: _A = nodea else: _A = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None: self.link(self.find_set(lowerCamelCase__ ) , self.find_set(lowerCamelCase__ ) ) class a ( Generic[T] ): """simple docstring""" def __init__( self ) -> None: _A = {} def UpperCAmelCase ( self , lowerCAmelCase_ ) -> None: if node not in self.connections: _A = {} def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None: self.add_node(lowerCamelCase__ ) self.add_node(lowerCamelCase__ ) _A = weight _A = weight def UpperCAmelCase ( self ) -> GraphUndirectedWeighted[T]: _A = [] _A = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda lowerCAmelCase_ : x[2] ) # creating the disjoint set _A = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(lowerCamelCase__ ) # MST generation _A = 0 _A = 0 _A = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: _A = edges[index] index += 1 _A = disjoint_set.find_set(lowerCamelCase__ ) _A = disjoint_set.find_set(lowerCamelCase__ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) disjoint_set.union(lowerCamelCase__ , lowerCamelCase__ ) return graph
718
import colorsys from PIL import Image # type: ignore def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float: _A = x _A = y for step in range(snake_case__): # noqa: B007 _A = a * a - b * b + x _A = 2 * a * b + y _A = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def snake_case ( snake_case__ :float) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def snake_case ( snake_case__ :float) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1)) def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image: _A = Image.new("""RGB""" , (image_width, image_height)) _A = img.load() # loop through the image-coordinates for image_x in range(snake_case__): for image_y in range(snake_case__): # determine the figure-coordinates based on the image-coordinates _A = figure_width / image_width * image_height _A = figure_center_x + (image_x / image_width - 0.5) * figure_width _A = figure_center_y + (image_y / image_height - 0.5) * figure_height _A = get_distance(snake_case__ , snake_case__ , snake_case__) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: _A = get_color_coded_rgb(snake_case__) else: _A = get_black_and_white_rgb(snake_case__) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _SCREAMING_SNAKE_CASE = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
83
0
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def snake_case ( ) -> Union[str, Any]: _A , _A = 9, 14 # noqa: F841 _A = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _A = defaultdict(lowercase__) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost]) adjancency[nodea].append([nodea, cost]) _A = mst(lowercase__) _A = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _A = tuple(answer[:2]) _A = tuple(edge[::-1]) assert edge in result or reverse in result
719
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets _SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' _SCREAMING_SNAKE_CASE = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' _SCREAMING_SNAKE_CASE = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def snake_case ( snake_case__ :Optional[Any] , snake_case__ :str , snake_case__ :List[str]=False , snake_case__ :Dict=False , snake_case__ :Any=True , snake_case__ :List[str]=False , snake_case__ :Optional[Any]="dummy_doc") -> List[Any]: _A = {doc: key_lines} _A = {doc: sys_lines} _A = {} _A = 0 _A = 0 _A = 0 _A = 0 _A = 0 _A = 0 _A , _A = reader.get_doc_mentions(snake_case__ , key_doc_lines[doc] , snake_case__) key_singletons_num += singletons_num if NP_only or min_span: _A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__) _A , _A = reader.get_doc_mentions(snake_case__ , sys_doc_lines[doc] , snake_case__) sys_singletons_num += singletons_num if NP_only or min_span: _A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__) if remove_nested: _A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _A = reader.get_mention_assignments(snake_case__ , snake_case__) _A = reader.get_mention_assignments(snake_case__ , snake_case__) _A = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''') logger.info( """Number of resulting singleton clusters in the key """ F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''') if not keep_singletons: logger.info( F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' """files, respectively""") return doc_coref_infos def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Dict , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Tuple) -> int: _A = get_coref_infos(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__) _A = {} _A = 0 _A = 0 for name, metric in metrics: _A , _A , _A = evaluator.evaluate_documents(snake_case__ , snake_case__ , beta=1) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa}) logger.info( name.ljust(10) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , ) if conll_subparts_num == 3: _A = (conll / 3) * 100 logger.info(F'''CoNLL score: {conll:.2f}''') output_scores.update({"""conll_score""": conll}) return output_scores def snake_case ( snake_case__ :Union[str, Any]) -> List[Any]: _A = False for line in key_lines: if not line.startswith("""#"""): if len(line.split()) > 6: _A = line.split()[5] if not parse_col == "-": _A = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): """simple docstring""" def UpperCAmelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ] , ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Union[str, Any]: _A = [ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: _A = util.check_gold_parse_annotation(lowerCAmelCase_ ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _A = evaluate( key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , ) return score
83
0
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class a : """simple docstring""" @property def UpperCAmelCase ( self ) -> List[Any]: return self.get_dummy_input() @property def UpperCAmelCase ( self ) -> Union[str, Any]: if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def UpperCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Union[str, Any]: _A = 4 _A = 32 _A = (32, 32) _A = torch.manual_seed(0 ) _A = torch.device(__a ) _A = (batch_size, num_channels) + sizes _A = randn_tensor(__a , generator=__a , device=__a ) _A = {"""hidden_states""": hidden_states} if include_temb: _A = 1_28 _A = randn_tensor((batch_size, temb_channels) , generator=__a , device=__a ) if include_res_hidden_states_tuple: _A = torch.manual_seed(1 ) _A = (randn_tensor(__a , generator=__a , device=__a ),) if include_encoder_hidden_states: _A = floats_tensor((batch_size, 32, 32) ).to(__a ) if include_skip_sample: _A = randn_tensor(((batch_size, 3) + sizes) , generator=__a , device=__a ) return dummy_input def UpperCAmelCase ( self ) -> Dict: _A = { """in_channels""": 32, """out_channels""": 32, """temb_channels""": 1_28, } if self.block_type == "up": _A = 32 if self.block_type == "mid": init_dict.pop("""out_channels""" ) _A = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any: _A = self.prepare_init_args_and_inputs_for_common() _A = self.block_class(**__a ) unet_block.to(__a ) unet_block.eval() with torch.no_grad(): _A = unet_block(**__a ) if isinstance(__a , __a ): _A = output[0] self.assertEqual(output.shape , self.output_shape ) _A = output[0, -1, -3:, -3:] _A = torch.tensor(__a ).to(__a ) assert torch_all_close(output_slice.flatten() , __a , atol=5E-3 ) @unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" ) def UpperCAmelCase ( self ) -> Tuple: _A = self.prepare_init_args_and_inputs_for_common() _A = self.block_class(**__a ) model.to(__a ) model.train() _A = model(**__a ) if isinstance(__a , __a ): _A = output[0] _A = torch.device(__a ) _A = randn_tensor(output.shape , device=__a ) _A = torch.nn.functional.mse_loss(__a , __a ) loss.backward()
720
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } _SCREAMING_SNAKE_CASE = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } _SCREAMING_SNAKE_CASE = {'facebook/blenderbot_small-90M': 512} def snake_case ( snake_case__ :Tuple) -> str: _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char)) _A = char _A = set(snake_case__) return pairs class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :List[Any] = VOCAB_FILES_NAMES lowerCamelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase :int = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="__start__" , lowerCAmelCase_="__end__" , lowerCAmelCase_="__unk__" , lowerCAmelCase_="__null__" , **lowerCAmelCase_ , ) -> int: super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ ) with open(lowerCAmelCase_ , encoding="""utf-8""" ) as vocab_handle: _A = json.load(lowerCAmelCase_ ) _A = {v: k for k, v in self.encoder.items()} with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle: _A = merges_handle.read().split("""\n""" )[1:-1] _A = [tuple(merge.split() ) for merge in merges] _A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _A = {} @property def UpperCAmelCase ( self ) -> int: return len(self.encoder ) def UpperCAmelCase ( self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: if token in self.cache: return self.cache[token] _A = re.sub("""([.,!?()])""" , r""" \1""" , lowerCAmelCase_ ) _A = re.sub("""(')""" , r""" \1 """ , lowerCAmelCase_ ) _A = re.sub(r"""\s{2,}""" , """ """ , lowerCAmelCase_ ) if "\n" in token: _A = token.replace("""\n""" , """ __newln__""" ) _A = token.split(""" """ ) _A = [] for token in tokens: if not len(lowerCAmelCase_ ): continue _A = token.lower() _A = tuple(lowerCAmelCase_ ) _A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) _A = get_pairs(lowerCAmelCase_ ) if not pairs: words.append(lowerCAmelCase_ ) continue while True: _A = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break _A , _A = bigram _A = [] _A = 0 while i < len(lowerCAmelCase_ ): try: _A = word.index(lowerCAmelCase_ , lowerCAmelCase_ ) new_word.extend(word[i:j] ) _A = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(lowerCAmelCase_ ) _A = new_word if len(lowerCAmelCase_ ) == 1: break else: _A = get_pairs(lowerCAmelCase_ ) _A = """@@ """.join(lowerCAmelCase_ ) _A = word[:-4] _A = word words.append(lowerCAmelCase_ ) return " ".join(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]: _A = [] _A = re.findall(r"""\S+\n?""" , lowerCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) ) return split_tokens def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int: _A = token.lower() return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: return self.decoder.get(lowerCAmelCase_ , self.unk_token ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: _A = """ """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip() return out_string def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _A = os.path.join( lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + """\n""" ) _A = 0 with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) _A = token_index writer.write(""" """.join(lowerCAmelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file
83
0
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels _SCREAMING_SNAKE_CASE = object() # For specifying empty leaf dict `{}` _SCREAMING_SNAKE_CASE = object() def snake_case ( snake_case__ , snake_case__) -> Tuple: _A = tuple((re.compile(x + """$""") for x in qs)) for i in range(len(_UpperCamelCase) - len(_UpperCamelCase) + 1): _A = [x.match(_UpperCamelCase) for x, y in zip(_UpperCamelCase , ks[i:])] if matches and all(_UpperCamelCase): return True return False def snake_case ( snake_case__) -> List[str]: def replace(snake_case__ , snake_case__): for rule, replacement in rules: if _match(_UpperCamelCase , _UpperCamelCase): return replacement return val return replace def snake_case ( ) -> Tuple: return [ # embeddings (("transformer", "wpe", "embedding"), P("""mp""" , _UpperCamelCase)), (("transformer", "wte", "embedding"), P("""mp""" , _UpperCamelCase)), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_UpperCamelCase , """mp""")), (("attention", "out_proj", "kernel"), P("""mp""" , _UpperCamelCase)), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(_UpperCamelCase , """mp""")), (("mlp", "c_fc", "bias"), P("""mp""")), (("mlp", "c_proj", "kernel"), P("""mp""" , _UpperCamelCase)), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def snake_case ( snake_case__) -> Any: _A = _get_partition_rules() _A = _replacement_rules(_UpperCamelCase) _A = {k: _unmatched for k in flatten_dict(_UpperCamelCase)} _A = {k: replace(_UpperCamelCase , _UpperCamelCase) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(_UpperCamelCase))
721
_SCREAMING_SNAKE_CASE = { 'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...', '8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.', ':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.', '?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-', '(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/' } # Exclamation mark is not in ITU-R recommendation # fmt: on _SCREAMING_SNAKE_CASE = {value: key for key, value in MORSE_CODE_DICT.items()} def snake_case ( snake_case__ :str) -> str: return " ".join(MORSE_CODE_DICT[char] for char in message.upper()) def snake_case ( snake_case__ :str) -> str: return "".join(REVERSE_DICT[char] for char in message.split()) def snake_case ( ) -> None: _A = """Morse code here!""" print(snake_case__) _A = encrypt(snake_case__) print(snake_case__) _A = decrypt(snake_case__) print(snake_case__) if __name__ == "__main__": main()
83
0
'''simple docstring''' import doctest from collections import deque import numpy as np class a : """simple docstring""" def __init__( self ) -> None: _A = [2, 1, 2, -1] _A = [1, 2, 3, 4] def UpperCAmelCase ( self ) -> list[float]: _A = len(self.first_signal ) _A = len(self.second_signal ) _A = max(A__ , A__ ) # create a zero matrix of max_length x max_length _A = [[0] * max_length for i in range(A__ )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(A__ ): _A = deque(self.second_signal ) rotated_signal.rotate(A__ ) for j, item in enumerate(A__ ): matrix[i][j] += item # multiply the matrix with the first signal _A = np.matmul(np.transpose(A__ ) , np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(A__ , 2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
700
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { 'configuration_jukebox': [ 'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'JukeboxConfig', 'JukeboxPriorConfig', 'JukeboxVQVAEConfig', ], 'tokenization_jukebox': ['JukeboxTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ 'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'JukeboxModel', 'JukeboxPreTrainedModel', 'JukeboxVQVAE', 'JukeboxPrior', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
83
0
import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class a ( unittest.TestCase ): """simple docstring""" @property def UpperCAmelCase ( self ) -> str: torch.manual_seed(0 ) _A = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def UpperCAmelCase ( self ) -> Dict: _A = self.dummy_uncond_unet _A = PNDMScheduler() _A = PNDMPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ ) pndm.to(lowerCAmelCase_ ) pndm.set_progress_bar_config(disable=lowerCAmelCase_ ) _A = torch.manual_seed(0 ) _A = pndm(generator=lowerCAmelCase_ , num_inference_steps=20 , output_type="""numpy""" ).images _A = torch.manual_seed(0 ) _A = pndm(generator=lowerCAmelCase_ , num_inference_steps=20 , output_type="""numpy""" , return_dict=lowerCAmelCase_ )[0] _A = image[0, -3:, -3:, -1] _A = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _A = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class a ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> Optional[Any]: _A = """google/ddpm-cifar10-32""" _A = UNetaDModel.from_pretrained(lowerCAmelCase_ ) _A = PNDMScheduler() _A = PNDMPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ ) pndm.to(lowerCAmelCase_ ) pndm.set_progress_bar_config(disable=lowerCAmelCase_ ) _A = torch.manual_seed(0 ) _A = pndm(generator=lowerCAmelCase_ , output_type="""numpy""" ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _A = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
701
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Tuple = '''philschmid/bart-large-cnn-samsum''' lowerCamelCase :Tuple = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) lowerCamelCase :List[Any] = '''summarizer''' lowerCamelCase :List[str] = AutoTokenizer lowerCamelCase :Dict = AutoModelForSeqaSeqLM lowerCamelCase :int = ['''text'''] lowerCamelCase :List[Any] = ['''text'''] def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]: return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: return self.model.generate(**lowerCAmelCase_ )[0] def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
83
0
def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[int] , snake_case__ :Any) -> Dict: _A = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def snake_case ( ) -> Optional[Any]: print(sum_of_series(1 , 1 , 10)) if __name__ == "__main__": import doctest doctest.testmod()
702
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = [ ('bert.bert', 'visual_bert'), ('bert.cls', 'cls'), ('bert.classifier', 'cls'), ('token_type_embeddings_visual', 'visual_token_type_embeddings'), ('position_embeddings_visual', 'visual_position_embeddings'), ('projection', 'visual_projection'), ] _SCREAMING_SNAKE_CASE = [ 'nlvr2_coco_pre_trained.th', 'nlvr2_fine_tuned.th', 'nlvr2_pre_trained.th', 'vcr_coco_pre_train.th', 'vcr_fine_tune.th', 'vcr_pre_train.th', 'vqa_coco_pre_trained.th', 'vqa_fine_tuned.th', 'vqa_pre_trained.th', ] def snake_case ( snake_case__ :Union[str, Any]) -> Dict: _A = torch.load(snake_case__ , map_location="""cpu""") return sd def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :int=rename_keys_prefix) -> Optional[Any]: _A = OrderedDict() _A = torch.arange(config.max_position_embeddings).expand((1, -1)) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue _A = key for name_pair in rename_keys_prefix: _A = new_key.replace(name_pair[0] , name_pair[1]) _A = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately _A = new_d["""cls.predictions.bias"""] return new_d @torch.no_grad() def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple) -> int: assert ( checkpoint_path.split("""/""")[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: _A = """pretraining""" if "vcr" in checkpoint_path: _A = {"""visual_embedding_dim""": 512} elif "vqa_advanced" in checkpoint_path: _A = {"""visual_embedding_dim""": 2_048} elif "vqa" in checkpoint_path: _A = {"""visual_embedding_dim""": 2_048} elif "nlvr" in checkpoint_path: _A = {"""visual_embedding_dim""": 1_024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''') else: if "vcr" in checkpoint_path: _A = {"""visual_embedding_dim""": 512} _A = """multichoice""" elif "vqa_advanced" in checkpoint_path: _A = {"""visual_embedding_dim""": 2_048} _A = """vqa_advanced""" elif "vqa" in checkpoint_path: _A = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129} _A = """vqa""" elif "nlvr" in checkpoint_path: _A = { """visual_embedding_dim""": 1_024, """num_labels""": 2, } _A = """nlvr""" _A = VisualBertConfig(**snake_case__) # Load State Dict _A = load_state_dict(snake_case__) _A = get_new_dict(snake_case__ , snake_case__) if model_type == "pretraining": _A = VisualBertForPreTraining(snake_case__) elif model_type == "vqa": _A = VisualBertForQuestionAnswering(snake_case__) elif model_type == "nlvr": _A = VisualBertForVisualReasoning(snake_case__) elif model_type == "multichoice": _A = VisualBertForMultipleChoice(snake_case__) model.load_state_dict(snake_case__) # Save Checkpoints Path(snake_case__).mkdir(exist_ok=snake_case__) model.save_pretrained(snake_case__) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.') parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.') _SCREAMING_SNAKE_CASE = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
83
0
from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = {} def snake_case ( snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :List[str] = None , ) -> Optional[Any]: _A = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''') _A = formatter_cls for alias in set(aliases + [format_type]): if alias in _FORMAT_TYPES_ALIASES: logger.warning( F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''') _A = format_type def snake_case ( snake_case__ :int , snake_case__ :Any , snake_case__ :Tuple = None) -> Union[str, Any]: _A = aliases if aliases is not None else [] for alias in set(aliases + [format_type]): _A = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['python']) _register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow']) _register_formatter(NumpyFormatter, 'numpy', aliases=['np']) _register_formatter(PandasFormatter, 'pandas', aliases=['pd']) _register_formatter(CustomFormatter, 'custom') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch']) else: _SCREAMING_SNAKE_CASE = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.') _register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, 'tensorflow', aliases=['tf']) else: _SCREAMING_SNAKE_CASE = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.') _register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, 'jax', aliases=[]) else: _SCREAMING_SNAKE_CASE = ValueError('JAX needs to be installed to be able to return JAX arrays.') _register_unavailable_formatter(_jax_error, 'jax', aliases=[]) def snake_case ( snake_case__ :Any) -> Union[str, Any]: if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def snake_case ( snake_case__ :Optional[Any] , **snake_case__ :int) -> Tuple: _A = get_format_type_from_alias(lowerCAmelCase_) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**lowerCAmelCase_) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None)}, but got \'{format_type}\'''')
703
from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class a ( __lowerCAmelCase ): """simple docstring""" def UpperCAmelCase ( self ) -> List[str]: return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def UpperCAmelCase ( self ) -> Optional[int]: _A = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]} return Dataset.from_dict(lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Dict: _A = self._create_example_records() _A = Dataset.from_list(lowerCAmelCase_ ) self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] ) for i, r in enumerate(lowerCAmelCase_ ): self.assertDictEqual(lowerCAmelCase_ , example_records[i] ) def UpperCAmelCase ( self ) -> str: _A = self._create_example_records() _A = Dataset.from_list(lowerCAmelCase_ ) _A = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} ) self.assertEqual(dset.info , dset_from_dict.info ) def UpperCAmelCase ( self ) -> Any: # checks what happens with missing columns _A = [{"""col_1""": 1}, {"""col_2""": """x"""}] _A = Dataset.from_list(lowerCAmelCase_ ) self.assertDictEqual(dset[0] , {"""col_1""": 1} ) self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns def UpperCAmelCase ( self ) -> Tuple: # checks if the type can be inferred from the second record _A = [{"""col_1""": []}, {"""col_1""": [1, 2]}] _A = Dataset.from_list(lowerCAmelCase_ ) self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) ) def UpperCAmelCase ( self ) -> Any: _A = Dataset.from_list([] ) self.assertEqual(len(lowerCAmelCase_ ) , 0 ) self.assertListEqual(dset.column_names , [] )
83
0
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance _SCREAMING_SNAKE_CASE = 6_378_137.0 _SCREAMING_SNAKE_CASE = 6_356_752.314_245 _SCREAMING_SNAKE_CASE = 6_378_137 def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :float , snake_case__ :float) -> Dict: _A = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude _A = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__))) _A = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__))) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius _A = haversine_distance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) / EQUATORIAL_RADIUS # Intermediate P and Q values _A = (b_lata + b_lata) / 2 _A = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) _A = (sin(SCREAMING_SNAKE_CASE__) ** 2) * (cos(SCREAMING_SNAKE_CASE__) ** 2) _A = cos(sigma / 2) ** 2 _A = (sigma - sin(SCREAMING_SNAKE_CASE__)) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) _A = (cos(SCREAMING_SNAKE_CASE__) ** 2) * (sin(SCREAMING_SNAKE_CASE__) ** 2) _A = sin(sigma / 2) ** 2 _A = (sigma + sin(SCREAMING_SNAKE_CASE__)) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
704
def snake_case ( snake_case__ :int = 1_000_000) -> int: _A = set(range(3 , snake_case__ , 2)) primes.add(2) for p in range(3 , snake_case__ , 2): if p not in primes: continue primes.difference_update(set(range(p * p , snake_case__ , snake_case__))) _A = [float(snake_case__) for n in range(limit + 1)] for p in primes: for n in range(snake_case__ , limit + 1 , snake_case__): phi[n] *= 1 - 1 / p return int(sum(phi[2:])) if __name__ == "__main__": print(F'''{solution() = }''')
83
0
'''simple docstring''' import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class a : """simple docstring""" @staticmethod def UpperCAmelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int: pass @is_pipeline_test @require_vision class a ( unittest.TestCase ): """simple docstring""" @require_torch def UpperCAmelCase ( self ) -> int: _A = pipeline( model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , ) _A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) _A = image_classifier(__snake_case , candidate_labels=["""a""", """b""", """c"""] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__snake_case ) , [ [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}], [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}], ] , ) _A = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {"""score""": 0.333, """label""": ANY(__snake_case )}, {"""score""": 0.333, """label""": ANY(__snake_case )}, {"""score""": 0.333, """label""": ANY(__snake_case )}, ], [ {"""score""": 0.333, """label""": ANY(__snake_case )}, {"""score""": 0.333, """label""": ANY(__snake_case )}, {"""score""": 0.333, """label""": ANY(__snake_case )}, ], [ {"""score""": 0.333, """label""": ANY(__snake_case )}, {"""score""": 0.333, """label""": ANY(__snake_case )}, {"""score""": 0.333, """label""": ANY(__snake_case )}, ], [ {"""score""": 0.333, """label""": ANY(__snake_case )}, {"""score""": 0.333, """label""": ANY(__snake_case )}, {"""score""": 0.333, """label""": ANY(__snake_case )}, ], [ {"""score""": 0.333, """label""": ANY(__snake_case )}, {"""score""": 0.333, """label""": ANY(__snake_case )}, {"""score""": 0.333, """label""": ANY(__snake_case )}, ], ] , ) @require_tf def UpperCAmelCase ( self ) -> Dict: _A = pipeline( model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" ) _A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) _A = image_classifier(__snake_case , candidate_labels=["""a""", """b""", """c"""] ) self.assertEqual( nested_simplify(__snake_case ) , [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] , ) _A = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {"""score""": 0.333, """label""": ANY(__snake_case )}, {"""score""": 0.333, """label""": ANY(__snake_case )}, {"""score""": 0.333, """label""": ANY(__snake_case )}, ], [ {"""score""": 0.333, """label""": ANY(__snake_case )}, {"""score""": 0.333, """label""": ANY(__snake_case )}, {"""score""": 0.333, """label""": ANY(__snake_case )}, ], [ {"""score""": 0.333, """label""": ANY(__snake_case )}, {"""score""": 0.333, """label""": ANY(__snake_case )}, {"""score""": 0.333, """label""": ANY(__snake_case )}, ], [ {"""score""": 0.333, """label""": ANY(__snake_case )}, {"""score""": 0.333, """label""": ANY(__snake_case )}, {"""score""": 0.333, """label""": ANY(__snake_case )}, ], [ {"""score""": 0.333, """label""": ANY(__snake_case )}, {"""score""": 0.333, """label""": ANY(__snake_case )}, {"""score""": 0.333, """label""": ANY(__snake_case )}, ], ] , ) @slow @require_torch def UpperCAmelCase ( self ) -> Optional[Any]: _A = pipeline( task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , ) # This is an image of 2 cats with remotes and no planes _A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) _A = image_classifier(__snake_case , candidate_labels=["""cat""", """plane""", """remote"""] ) self.assertEqual( nested_simplify(__snake_case ) , [ {"""score""": 0.511, """label""": """remote"""}, {"""score""": 0.485, """label""": """cat"""}, {"""score""": 0.004, """label""": """plane"""}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {"""score""": 0.511, """label""": """remote"""}, {"""score""": 0.485, """label""": """cat"""}, {"""score""": 0.004, """label""": """plane"""}, ], ] * 5 , ) @slow @require_tf def UpperCAmelCase ( self ) -> int: _A = pipeline( task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" ) # This is an image of 2 cats with remotes and no planes _A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) _A = image_classifier(__snake_case , candidate_labels=["""cat""", """plane""", """remote"""] ) self.assertEqual( nested_simplify(__snake_case ) , [ {"""score""": 0.511, """label""": """remote"""}, {"""score""": 0.485, """label""": """cat"""}, {"""score""": 0.004, """label""": """plane"""}, ] , ) _A = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 ) self.assertEqual( nested_simplify(__snake_case ) , [ [ {"""score""": 0.511, """label""": """remote"""}, {"""score""": 0.485, """label""": """cat"""}, {"""score""": 0.004, """label""": """plane"""}, ], ] * 5 , )
705
import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class a ( __lowerCAmelCase ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Union[str, Any]: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_input_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = relative_attention _A = position_biased_input _A = pos_att_type _A = scope def UpperCAmelCase ( self ) -> Dict: _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self ) -> Optional[int]: return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _A = DebertaVaModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] _A = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] _A = model(lowerCAmelCase_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: _A = DebertaVaForMaskedLM(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: _A = self.num_labels _A = DebertaVaForSequenceClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: _A = self.num_labels _A = DebertaVaForTokenClassification(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]: _A = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: _A = DebertaVaForMultipleChoice(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self ) -> Optional[int]: _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase :int = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) lowerCamelCase :str = ( { '''feature-extraction''': DebertaVaModel, '''fill-mask''': DebertaVaForMaskedLM, '''question-answering''': DebertaVaForQuestionAnswering, '''text-classification''': DebertaVaForSequenceClassification, '''token-classification''': DebertaVaForTokenClassification, '''zero-shot''': DebertaVaForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase :str = True lowerCamelCase :Union[str, Any] = False lowerCamelCase :Optional[int] = False lowerCamelCase :List[str] = False lowerCamelCase :str = False def UpperCAmelCase ( self ) -> Optional[int]: _A = DebertaVaModelTester(self ) _A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 ) def UpperCAmelCase ( self ) -> List[str]: self.config_tester.run_common_tests() def UpperCAmelCase ( self ) -> List[str]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Dict: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Any: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> int: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Dict: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> Optional[int]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ ) @slow def UpperCAmelCase ( self ) -> Any: for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = DebertaVaModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class a ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason="""Model not available yet""" ) def UpperCAmelCase ( self ) -> int: pass @slow def UpperCAmelCase ( self ) -> Optional[Any]: _A = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" ) _A = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] ) _A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0] # compare the actual values for a slice. _A = torch.tensor( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
83
0
def snake_case ( snake_case__ :list[int] , snake_case__ :list[int]) -> str: _A = len(snake_case__) print("""The following activities are selected:""") # The first activity is always selected _A = 0 print(snake_case__ , end=""",""") # Consider rest of the activities for j in range(snake_case__): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(snake_case__ , end=""",""") _A = j if __name__ == "__main__": import doctest doctest.testmod() _SCREAMING_SNAKE_CASE = [1, 3, 0, 5, 8, 5] _SCREAMING_SNAKE_CASE = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
706
def snake_case ( snake_case__ :int , snake_case__ :int) -> int: return int(input_a == input_a == 0) def snake_case ( ) -> None: print("""Truth Table of NOR Gate:""") print("""| Input 1 | Input 2 | Output |""") print(F'''| 0 | 0 | {nor_gate(0 , 0)} |''') print(F'''| 0 | 1 | {nor_gate(0 , 1)} |''') print(F'''| 1 | 0 | {nor_gate(1 , 0)} |''') print(F'''| 1 | 1 | {nor_gate(1 , 1)} |''') if __name__ == "__main__": import doctest doctest.testmod() main()
83
0
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal _SCREAMING_SNAKE_CASE = datasets.utils.logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = ['names', 'prefix'] _SCREAMING_SNAKE_CASE = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] _SCREAMING_SNAKE_CASE = ['encoding_errors', 'on_bad_lines'] _SCREAMING_SNAKE_CASE = ['date_format'] @dataclass class a ( datasets.BuilderConfig ): """simple docstring""" lowerCamelCase :str = "," lowerCamelCase :Optional[str] = None lowerCamelCase :Optional[Union[int, List[int], str]] = "infer" lowerCamelCase :Optional[List[str]] = None lowerCamelCase :Optional[List[str]] = None lowerCamelCase :Optional[Union[int, str, List[int], List[str]]] = None lowerCamelCase :Optional[Union[List[int], List[str]]] = None lowerCamelCase :Optional[str] = None lowerCamelCase :bool = True lowerCamelCase :Optional[Literal["c", "python", "pyarrow"]] = None lowerCamelCase :Dict[Union[int, str], Callable[[Any], Any]] = None lowerCamelCase :Optional[list] = None lowerCamelCase :Optional[list] = None lowerCamelCase :bool = False lowerCamelCase :Optional[Union[int, List[int]]] = None lowerCamelCase :Optional[int] = None lowerCamelCase :Optional[Union[str, List[str]]] = None lowerCamelCase :bool = True lowerCamelCase :bool = True lowerCamelCase :bool = False lowerCamelCase :bool = True lowerCamelCase :Optional[str] = None lowerCamelCase :str = "." lowerCamelCase :Optional[str] = None lowerCamelCase :str = '"' lowerCamelCase :int = 0 lowerCamelCase :Optional[str] = None lowerCamelCase :Optional[str] = None lowerCamelCase :Optional[str] = None lowerCamelCase :Optional[str] = None lowerCamelCase :bool = True lowerCamelCase :bool = True lowerCamelCase :int = 0 lowerCamelCase :bool = True lowerCamelCase :bool = False lowerCamelCase :Optional[str] = None lowerCamelCase :int = 10000 lowerCamelCase :Optional[datasets.Features] = None lowerCamelCase :Optional[str] = "strict" lowerCamelCase :Literal["error", "warn", "skip"] = "error" lowerCamelCase :Optional[str] = None def UpperCAmelCase ( self ) -> List[str]: if self.delimiter is not None: _A = self.delimiter if self.column_names is not None: _A = self.column_names @property def UpperCAmelCase ( self ) -> Any: _A = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __a ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class a ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCamelCase :Optional[Any] = CsvConfig def UpperCAmelCase ( self ) -> Any: return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]: if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) _A = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__a , (str, list, tuple) ): _A = data_files if isinstance(__a , __a ): _A = [files] _A = [dl_manager.iter_files(__a ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] _A = [] for split_name, files in data_files.items(): if isinstance(__a , __a ): _A = [files] _A = [dl_manager.iter_files(__a ) for file in files] splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={"""files""": files} ) ) return splits def UpperCAmelCase ( self , lowerCAmelCase_ ) -> pa.Table: if self.config.features is not None: _A = self.config.features.arrow_schema if all(not require_storage_cast(__a ) for feature in self.config.features.values() ): # cheaper cast _A = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__a ) else: # more expensive cast; allows str <-> int/float or str to Audio for example _A = table_cast(__a , __a ) return pa_table def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str: _A = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str _A = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(__a ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(__a ) ): _A = pd.read_csv(__a , iterator=__a , dtype=__a , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(__a ): _A = pa.Table.from_pandas(__a ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__a ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(__a )}: {e}''' ) raise
707
import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class a : """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=sys.maxsize ) -> str: _A = """bilinear""" _A = max_size _A = short_edge_length def __call__( self , lowerCAmelCase_ ) -> Optional[Any]: _A = [] for img in imgs: _A , _A = img.shape[:2] # later: provide list and randomly choose index for resize _A = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img _A = size * 1.0 / min(lowerCAmelCase_ , lowerCAmelCase_ ) if h < w: _A , _A = size, scale * w else: _A , _A = scale * h, size if max(lowerCAmelCase_ , lowerCAmelCase_ ) > self.max_size: _A = self.max_size * 1.0 / max(lowerCAmelCase_ , lowerCAmelCase_ ) _A = newh * scale _A = neww * scale _A = int(neww + 0.5 ) _A = int(newh + 0.5 ) if img.dtype == np.uinta: _A = Image.fromarray(lowerCAmelCase_ ) _A = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) _A = np.asarray(lowerCAmelCase_ ) else: _A = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw _A = nn.functional.interpolate( lowerCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase_ ).squeeze(0 ) img_augs.append(lowerCAmelCase_ ) return img_augs class a : """simple docstring""" def __init__( self , lowerCAmelCase_ ) -> List[Any]: _A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) _A = cfg.INPUT.FORMAT _A = cfg.SIZE_DIVISIBILITY _A = cfg.PAD_VALUE _A = cfg.INPUT.MAX_SIZE_TEST _A = cfg.MODEL.DEVICE _A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) _A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) _A = lambda lowerCAmelCase_ : (x - self.pixel_mean) / self.pixel_std def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: _A = tuple(max(lowerCAmelCase_ ) for s in zip(*[img.shape for img in images] ) ) _A = [im.shape[-2:] for im in images] _A = [ nn.functional.pad( lowerCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(lowerCAmelCase_ , lowerCAmelCase_ ) ] return torch.stack(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ ) def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int: with torch.no_grad(): if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _A = [images] if single_image: assert len(lowerCAmelCase_ ) == 1 for i in range(len(lowerCAmelCase_ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(lowerCAmelCase_ , images.pop(lowerCAmelCase_ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( lowerCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase_ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge _A = torch.tensor([im.shape[:2] for im in images] ) _A = self.aug(lowerCAmelCase_ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic _A = [self.normalizer(lowerCAmelCase_ ) for x in images] # now pad them to do the following operations _A , _A = self.pad(lowerCAmelCase_ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad _A = torch.true_divide(lowerCAmelCase_ , lowerCAmelCase_ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[Any]) -> Tuple: boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def snake_case ( snake_case__ :Optional[int] , snake_case__ :Tuple[int, int]) -> Optional[Any]: assert torch.isfinite(snake_case__).all(), "Box tensor contains infinite or NaN!" _A , _A = box_size tensor[:, 0].clamp_(min=0 , max=snake_case__) tensor[:, 1].clamp_(min=0 , max=snake_case__) tensor[:, 2].clamp_(min=0 , max=snake_case__) tensor[:, 3].clamp_(min=0 , max=snake_case__)
83
0
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class a ( UpperCAmelCase_ ): """simple docstring""" @slow @require_torch def UpperCAmelCase ( self ) -> str: _A = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) _A = BertTokenizer.from_pretrained("""bert-base-uncased""" ) _A = bertabert.config.encoder.vocab_size _A = tokenizer.sep_token_id _A = tokenizer.cls_token_id _A = 1_28 _A = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) _A = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) _A = train_dataset.select(range(32 ) ) _A = val_dataset.select(range(16 ) ) _A = 4 def _map_to_encoder_decoder_inputs(lowerCAmelCase_ ): # Tokenizer will automatically set [BOS] <text> [EOS] _A = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_snake_case , max_length=5_12 ) _A = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_snake_case , max_length=1_28 ) _A = inputs.input_ids _A = inputs.attention_mask _A = outputs.input_ids _A = outputs.input_ids.copy() _A = [ [-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] _A = outputs.attention_mask assert all(len(_snake_case ) == 5_12 for x in inputs.input_ids ) assert all(len(_snake_case ) == 1_28 for x in outputs.input_ids ) return batch def _compute_metrics(lowerCAmelCase_ ): _A = pred.label_ids _A = pred.predictions # all unnecessary tokens are removed _A = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case ) _A = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case ) _A = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case ) return {"accuracy": accuracy} # map train dataset _A = train_dataset.map( _map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset _A = val_dataset.map( _map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) _A = self.get_auto_remove_tmp_dir() _A = SeqaSeqTrainingArguments( output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy="""steps""" , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer _A = SeqaSeqTrainer( model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , ) # start training trainer.train()
708
from collections import defaultdict def snake_case ( snake_case__ :int) -> int: _A = 1 _A = True for v in tree[start]: if v not in visited: ret += dfs(snake_case__) if ret % 2 == 0: cuts.append(snake_case__) return ret def snake_case ( ) -> Any: dfs(1) if __name__ == "__main__": _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10, 9 _SCREAMING_SNAKE_CASE = defaultdict(list) _SCREAMING_SNAKE_CASE = {} _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
83
0
# Function to print upper half of diamond (pyramid) def snake_case ( snake_case__ :List[str]) -> List[str]: for i in range(0 , snake_case__): for _ in range(0 , n - i - 1): # printing spaces print(""" """ , end="""""") for _ in range(0 , i + 1): # printing stars print("""* """ , end="""""") print() def snake_case ( snake_case__ :Union[str, Any]) -> Dict: for i in range(snake_case__ , 0 , -1): for _ in range(snake_case__ , 0 , -1): # printing stars print("""* """ , end="""""") print() for _ in range(n - i + 1 , 0 , -1): # printing spaces print(""" """ , end="""""") def snake_case ( snake_case__ :Tuple) -> Any: if n <= 0: print(""" ... .... nothing printing :(""") return floyd(snake_case__) # upper half reverse_floyd(snake_case__) # lower half if __name__ == "__main__": print(R'| /\ | |- | |- |--| |\ /| |-') print(R'|/ \| |- |_ |_ |__| | \/ | |_') _SCREAMING_SNAKE_CASE = 1 while K: _SCREAMING_SNAKE_CASE = int(input('enter the number and , and see the magic : ')) print() pretty_print(user_number) _SCREAMING_SNAKE_CASE = int(input('press 0 to exit... and 1 to continue...')) print('Good Bye...')
709
import heapq def snake_case ( snake_case__ :dict) -> set[int]: _A = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(snake_case__ , [-1 * len(snake_case__), (key, value)]) # chosen_vertices = set of chosen vertices _A = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices _A = heapq.heappop(snake_case__)[1][0] chosen_vertices.add(snake_case__) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: _A = elem[1][1].index(snake_case__) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(snake_case__) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() _SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
83
0
from dataclasses import dataclass, field from typing import Optional @dataclass class a : """simple docstring""" lowerCamelCase :Tuple = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} ) lowerCamelCase :str = field( default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} ) lowerCamelCase :List[Any] = field( default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} ) lowerCamelCase :Any = field( default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} ) lowerCamelCase :Tuple = field(default=2 , metadata={'''help''': '''Batch size for training.'''} ) lowerCamelCase :int = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} ) lowerCamelCase :Any = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} ) lowerCamelCase :str = field( default=10000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} ) lowerCamelCase :int = field(default=2E-4 , metadata={'''help''': '''Learning rate fo training.'''} ) lowerCamelCase :Union[str, Any] = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} ) lowerCamelCase :Union[str, Any] = field( default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} ) lowerCamelCase :str = field( default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} ) lowerCamelCase :List[Any] = field( default=UpperCamelCase__ , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} ) lowerCamelCase :Any = field(default=50000 , metadata={'''help''': '''Maximum number of training steps.'''} ) lowerCamelCase :Optional[int] = field( default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} ) lowerCamelCase :Dict = field(default=1024 , metadata={'''help''': '''Sequence lengths used for training.'''} ) lowerCamelCase :int = field(default=1 , metadata={'''help''': '''Training seed.'''} ) lowerCamelCase :Union[str, Any] = field( default=1024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , ) lowerCamelCase :Optional[int] = field( default=UpperCamelCase__ , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} ) lowerCamelCase :int = field(default=UpperCamelCase__ , metadata={'''help''': '''If True the data is pretokenized.'''} ) @dataclass class a : """simple docstring""" lowerCamelCase :int = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} ) lowerCamelCase :int = field( default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} ) lowerCamelCase :List[Any] = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} ) lowerCamelCase :List[Any] = field( default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} ) lowerCamelCase :List[str] = field(default=1024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} ) lowerCamelCase :Any = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} ) @dataclass class a : """simple docstring""" lowerCamelCase :Dict = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} ) lowerCamelCase :List[Any] = field(default=UpperCamelCase__ , metadata={'''help''': '''Number of workers used for code evaluation.'''} ) lowerCamelCase :Dict = field( default=UpperCamelCase__ , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , ) lowerCamelCase :Tuple = field( default=UpperCamelCase__ , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} ) lowerCamelCase :str = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} ) lowerCamelCase :Union[str, Any] = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} ) lowerCamelCase :Union[str, Any] = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} ) lowerCamelCase :Tuple = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} ) lowerCamelCase :Union[str, Any] = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} ) lowerCamelCase :List[Any] = field( default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} ) lowerCamelCase :Any = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} ) lowerCamelCase :List[str] = field( default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} ) lowerCamelCase :Dict = field( default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} ) lowerCamelCase :str = field( default=-1 , metadata={ '''help''': ( '''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive''' ''' number corresponds to which GPU device id to run on.''' ) } , ) @dataclass class a : """simple docstring""" lowerCamelCase :Dict = field( default=UpperCamelCase__ , metadata={ '''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.''' } , ) lowerCamelCase :Any = field( default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} ) lowerCamelCase :str = field( default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} ) lowerCamelCase :Tuple = field( default=100000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} ) lowerCamelCase :List[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} ) lowerCamelCase :Dict = field( default=1000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} ) lowerCamelCase :List[Any] = field( default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} ) lowerCamelCase :Dict = field( default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} ) lowerCamelCase :Optional[Any] = field( default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} ) lowerCamelCase :Optional[Any] = field( default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} ) lowerCamelCase :str = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , ) lowerCamelCase :Tuple = field( default=UpperCamelCase__ , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} ) lowerCamelCase :Dict = field( default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} ) @dataclass class a : """simple docstring""" lowerCamelCase :Tuple = field( default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} ) lowerCamelCase :int = field( default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} ) lowerCamelCase :List[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} ) lowerCamelCase :str = field(default=200000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} ) lowerCamelCase :Tuple = field( default=32768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} ) lowerCamelCase :int = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} ) lowerCamelCase :Any = field(default=UpperCamelCase__ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} ) @dataclass class a : """simple docstring""" lowerCamelCase :str = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} ) lowerCamelCase :Optional[Any] = field( default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} ) lowerCamelCase :List[str] = field( default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} ) lowerCamelCase :List[Any] = field(default=UpperCamelCase__ , metadata={'''help''': '''Number of workers used for code evaluation.'''} ) @dataclass class a : """simple docstring""" lowerCamelCase :Dict = field( default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} ) lowerCamelCase :List[Any] = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} ) lowerCamelCase :Union[str, Any] = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} ) lowerCamelCase :Optional[int] = field(default=UpperCamelCase__ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
710
import math import unittest def snake_case ( snake_case__ :int) -> bool: assert isinstance(snake_case__ , snake_case__) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__) + 1) , 6): if number % i == 0 or number % (i + 2) == 0: return False return True class a ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> List[Any]: self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def UpperCAmelCase ( self ) -> Dict: with self.assertRaises(lowerCAmelCase_ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , ) self.assertFalse( is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
83
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class a ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=18 , lowerCAmelCase_=30 , lowerCAmelCase_=4_00 , lowerCAmelCase_=True , lowerCAmelCase_=32 , lowerCAmelCase_=True , ) -> Union[str, Any]: _A = parent _A = batch_size _A = num_channels _A = image_size _A = min_resolution _A = max_resolution _A = do_resize _A = size_divisor _A = do_rescale def UpperCAmelCase ( self ) -> Union[str, Any]: return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" lowerCamelCase :Dict = GLPNImageProcessor if is_vision_available() else None def UpperCAmelCase ( self ) -> List[str]: _A = GLPNImageProcessingTester(self ) @property def UpperCAmelCase ( self ) -> List[str]: return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase ( self ) -> Optional[Any]: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """size_divisor""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """resample""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """do_rescale""" ) ) def UpperCAmelCase ( self ) -> List[Any]: pass def UpperCAmelCase ( self ) -> Union[str, Any]: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) _A = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def UpperCAmelCase ( self ) -> List[Any]: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) _A = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def UpperCAmelCase ( self ) -> Union[str, Any]: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) _A = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
711
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = {'configuration_encoder_decoder': ['EncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['EncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['TFEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['FlaxEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
83
0
import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class a ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCamelCase :Union[str, Any] = RoFormerTokenizer lowerCamelCase :List[Any] = RoFormerTokenizerFast lowerCamelCase :Tuple = True lowerCamelCase :Any = True def UpperCAmelCase ( self ) -> str: super().setUp() def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Tuple: return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowerCamelCase ) def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Tuple: return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowerCamelCase ) def UpperCAmelCase ( self ) -> str: _A = """永和服装饰品有限公司,今天天气非常好""" _A = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好""" return input_text, output_text def UpperCAmelCase ( self ) -> Union[str, Any]: _A = self.get_tokenizer() _A , _A = self.get_chinese_input_output_texts() _A = tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , output_text.split() ) _A = tokens + [tokenizer.unk_token] _A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase ) def UpperCAmelCase ( self ) -> Optional[int]: _A = self.get_rust_tokenizer() _A , _A = self.get_chinese_input_output_texts() _A = tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , output_text.split() ) _A = tokens + [tokenizer.unk_token] _A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase ) def UpperCAmelCase ( self ) -> Union[str, Any]: pass def UpperCAmelCase ( self ) -> Union[str, Any]: pass def UpperCAmelCase ( self ) -> Optional[Any]: pass
712
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) @add_end_docstrings(__lowerCAmelCase ) class a ( __lowerCAmelCase ): """simple docstring""" def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]: super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) self.check_model_type(lowerCAmelCase_ ) def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple: _A , _A = {}, {} if padding is not None: _A = padding if truncation is not None: _A = truncation if top_k is not None: _A = top_k return preprocess_params, {}, postprocess_params def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ) -> Union[str, Any]: if isinstance(lowerCAmelCase_ , (Image.Image, str) ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _A = {"""image""": image, """question""": question} else: _A = image _A = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ ) return results def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any: _A = load_image(inputs["""image"""] ) _A = self.tokenizer( inputs["""question"""] , return_tensors=self.framework , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ ) _A = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework ) model_inputs.update(lowerCAmelCase_ ) return model_inputs def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: _A = self.model(**lowerCAmelCase_ ) return model_outputs def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> Union[str, Any]: if top_k > self.model.config.num_labels: _A = self.model.config.num_labels if self.framework == "pt": _A = model_outputs.logits.sigmoid()[0] _A , _A = probs.topk(lowerCAmelCase_ ) else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) _A = scores.tolist() _A = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
83
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { "configuration_upernet": ["UperNetConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "UperNetForSemanticSegmentation", "UperNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
713
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :str , snake_case__ :PreTrainedTokenizer , snake_case__ :int , snake_case__ :Optional[int] = None , ) -> Optional[int]: _A = {} if train_file is not None: _A = [train_file] if eval_file is not None: _A = [eval_file] if test_file is not None: _A = [test_file] _A = datasets.load_dataset("""csv""" , data_files=snake_case__) _A = list(ds[list(files.keys())[0]].features.keys()) _A = features_name.pop(snake_case__) _A = list(set(ds[list(files.keys())[0]][label_name])) _A = {label: i for i, label in enumerate(snake_case__)} _A = tokenizer.model_input_names _A = {} if len(snake_case__) == 1: for k in files.keys(): _A = ds[k].map( lambda snake_case__: tokenizer.batch_encode_plus( example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""") , batched=snake_case__ , ) elif len(snake_case__) == 2: for k in files.keys(): _A = ds[k].map( lambda snake_case__: tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding="""max_length""" , ) , batched=snake_case__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) _A = ( tf.data.Dataset.from_generator( snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: _A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN]))) _A = ( tf.data.Dataset.from_generator( snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: _A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION]))) _A = ( tf.data.Dataset.from_generator( snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: _A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST]))) return train_ds, val_ds, test_ds, labelaid _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) @dataclass class a : """simple docstring""" lowerCamelCase :int = field(metadata={'''help''': '''Which column contains the label'''} ) lowerCamelCase :str = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the training file'''} ) lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the development file'''} ) lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''The path of the test file'''} ) lowerCamelCase :int = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) lowerCamelCase :bool = field( default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class a : """simple docstring""" lowerCamelCase :str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) lowerCamelCase :Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCamelCase :Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. lowerCamelCase :Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def snake_case ( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) _A , _A , _A = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""") # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.info( F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, ''' F'''16-bits training: {training_args.fpaa}''') logger.info(F'''Training/evaluation parameters {training_args}''') # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _A , _A , _A , _A = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) _A = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): _A = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , ) def compute_metrics(snake_case__ :EvalPrediction) -> Dict: _A = np.argmax(p.predictions , axis=1) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer _A = TFTrainer( model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir) # Evaluation _A = {} if training_args.do_eval: logger.info("""*** Evaluate ***""") _A = trainer.evaluate() _A = os.path.join(training_args.output_dir , """eval_results.txt""") with open(snake_case__ , """w""") as writer: logger.info("""***** Eval results *****""") for key, value in result.items(): logger.info(F''' {key} = {value}''') writer.write(F'''{key} = {value}\n''') results.update(snake_case__) return results if __name__ == "__main__": main()
83
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
714
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class a ( __lowerCAmelCase ): """simple docstring""" lowerCamelCase :Union[str, Any] = '''speech_to_text''' lowerCamelCase :List[str] = ['''past_key_values'''] lowerCamelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , lowerCAmelCase_=1_00_00 , lowerCAmelCase_=12 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=60_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=2 , lowerCAmelCase_=(5, 5) , lowerCAmelCase_=10_24 , lowerCAmelCase_=80 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Tuple: _A = vocab_size _A = d_model _A = encoder_ffn_dim _A = encoder_layers _A = encoder_attention_heads _A = decoder_ffn_dim _A = decoder_layers _A = decoder_attention_heads _A = dropout _A = attention_dropout _A = activation_dropout _A = activation_function _A = init_std _A = encoder_layerdrop _A = decoder_layerdrop _A = use_cache _A = encoder_layers _A = scale_embedding # scale factor will be sqrt(d_model) if True _A = max_source_positions _A = max_target_positions _A = num_conv_layers _A = list(lowerCAmelCase_ ) _A = conv_channels _A = input_feat_per_channel _A = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """ F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' F'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
83
0
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def snake_case ( snake_case__ :Optional[int] , snake_case__ :Dict) -> Optional[int]: _A = k_size // 2 _A , _A = mgrid[0 - center : k_size - center, 0 - center : k_size - center] _A = 1 / (2 * pi * sigma) * exp(-(square(UpperCamelCase__) + square(UpperCamelCase__)) / (2 * square(UpperCamelCase__))) return g def snake_case ( snake_case__ :List[str] , snake_case__ :List[str] , snake_case__ :List[Any]) -> Dict: _A , _A = image.shape[0], image.shape[1] # dst image height and width _A = height - k_size + 1 _A = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows _A = zeros((dst_height * dst_width, k_size * k_size)) _A = 0 for i, j in product(range(UpperCamelCase__) , range(UpperCamelCase__)): _A = ravel(image[i : i + k_size, j : j + k_size]) _A = window row += 1 # turn the kernel into shape(k*k, 1) _A = gen_gaussian_kernel(UpperCamelCase__ , UpperCamelCase__) _A = ravel(UpperCamelCase__) # reshape and get the dst image _A = dot(UpperCamelCase__ , UpperCamelCase__).reshape(UpperCamelCase__ , UpperCamelCase__).astype(UpperCamelCase__) return dst if __name__ == "__main__": # read original image _SCREAMING_SNAKE_CASE = imread(R'../image_data/lena.jpg') # turn image in gray scale value _SCREAMING_SNAKE_CASE = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size _SCREAMING_SNAKE_CASE = gaussian_filter(gray, 3, sigma=1) _SCREAMING_SNAKE_CASE = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow('gaussian filter with 3x3 mask', gaussianaxa) imshow('gaussian filter with 5x5 mask', gaussianaxa) waitKey()
715
from __future__ import annotations from collections.abc import Callable def snake_case ( snake_case__ :Callable[[int | float], int | float] , snake_case__ :int | float , snake_case__ :int | float , snake_case__ :int = 100 , ) -> float: _A = x_start _A = fnc(snake_case__) _A = 0.0 for _ in range(snake_case__): # Approximates small segments of curve as linear and solve # for trapezoidal area _A = (x_end - x_start) / steps + xa _A = fnc(snake_case__) area += abs(fxa + fxa) * (xa - xa) / 2 # Increment step _A = xa _A = fxa return area if __name__ == "__main__": def snake_case ( snake_case__ :Tuple) -> List[str]: return x**3 + x**2 print('f(x) = x^3 + x^2') print('The area between the curve, x = -5, x = 5 and the x axis is:') _SCREAMING_SNAKE_CASE = 10 while i <= 100_000: print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''') i *= 10
83
0
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = TypeVar('DatasetType', Dataset, IterableDataset) def snake_case ( snake_case__ :List[Any] , snake_case__ :Optional[int] = None , snake_case__ :Dict = None , snake_case__ :Dict = None , snake_case__ :Optional[int] = None , snake_case__ :Optional[Any] = "first_exhausted" , ) -> Optional[Any]: from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("""Unable to interleave an empty list of datasets.""") for i, dataset in enumerate(_UpperCAmelCase): if not isinstance(_UpperCAmelCase , (Dataset, IterableDataset)): if isinstance(_UpperCAmelCase , (DatasetDict, IterableDatasetDict)): if not dataset: raise ValueError( F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ''' """is an empty dataset dictionary.""") raise ValueError( F'''Dataset at position {i} has at least one split: {list(_UpperCAmelCase)}\n''' F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCAmelCase))}\']''') raise ValueError( F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCAmelCase).__name__}.''') if i == 0: _A , _A = ( (Dataset, IterableDataset) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else (IterableDataset, Dataset) ) elif not isinstance(_UpperCAmelCase , _UpperCAmelCase): raise ValueError( F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''') if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''') if dataset_type is Dataset: return _interleave_map_style_datasets( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , stopping_strategy=_UpperCAmelCase) else: return _interleave_iterable_datasets( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , stopping_strategy=_UpperCAmelCase) def snake_case ( snake_case__ :List[str] , snake_case__ :List[Any] = None , snake_case__ :Any = None , snake_case__ :int = 0 , ) -> int: if not dsets: raise ValueError("""Unable to concatenate an empty list of datasets.""") for i, dataset in enumerate(_UpperCAmelCase): if not isinstance(_UpperCAmelCase , (Dataset, IterableDataset)): if isinstance(_UpperCAmelCase , (DatasetDict, IterableDatasetDict)): if not dataset: raise ValueError( F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ''' """is an empty dataset dictionary.""") raise ValueError( F'''Dataset at position {i} has at least one split: {list(_UpperCAmelCase)}\n''' F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCAmelCase))}\']''') raise ValueError( F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCAmelCase).__name__}.''') if i == 0: _A , _A = ( (Dataset, IterableDataset) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else (IterableDataset, Dataset) ) elif not isinstance(_UpperCAmelCase , _UpperCAmelCase): raise ValueError( F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''') if dataset_type is Dataset: return _concatenate_map_style_datasets(_UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , axis=_UpperCAmelCase) else: return _concatenate_iterable_datasets(_UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , axis=_UpperCAmelCase)
716
import numpy as np import qiskit def snake_case ( snake_case__ :int = 8 , snake_case__ :int | None = None) -> str: _A = np.random.default_rng(seed=snake_case__) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. _A = 6 * key_len # Measurement basis for Alice's qubits. _A = rng.integers(2 , size=snake_case__) # The set of states Alice will prepare. _A = rng.integers(2 , size=snake_case__) # Measurement basis for Bob's qubits. _A = rng.integers(2 , size=snake_case__) # Quantum Circuit to simulate BB84 _A = qiskit.QuantumCircuit(snake_case__ , name="""BB84""") # Alice prepares her qubits according to rules above. for index, _ in enumerate(snake_case__): if alice_state[index] == 1: bbaa_circ.x(snake_case__) if alice_basis[index] == 1: bbaa_circ.h(snake_case__) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(snake_case__): if bob_basis[index] == 1: bbaa_circ.h(snake_case__) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. _A = qiskit.Aer.get_backend("""aer_simulator""") # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. _A = qiskit.execute(snake_case__ , snake_case__ , shots=1 , seed_simulator=snake_case__) # Returns the result of measurement. _A = job.result().get_counts(snake_case__).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. _A = """""".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( snake_case__ , snake_case__ , snake_case__) if alice_basis_bit == bob_basis_bit ]) # Get final key. Pad with 0 if too short, otherwise truncate. _A = gen_key[:key_len] if len(snake_case__) >= key_len else gen_key.ljust(snake_case__ , """0""") return key if __name__ == "__main__": print(F'''The generated key is : {bbaa(8, seed=0)}''') from doctest import testmod testmod()
83
0