code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase_ : Any = { """configuration_pix2struct""": [ """PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Pix2StructConfig""", """Pix2StructTextConfig""", """Pix2StructVisionConfig""", ], """processing_pix2struct""": ["""Pix2StructProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : List[str] = ["""Pix2StructImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : str = [ """PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Pix2StructPreTrainedModel""", """Pix2StructForConditionalGeneration""", """Pix2StructVisionModel""", """Pix2StructTextModel""", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
318
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
318
1
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = 42 class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = 1 @register_to_config def __init__( self : Any , lowercase_ : int = 2000 , lowercase_ : float = 0.15 , lowercase_ : float = 0.01 , lowercase_ : float = 13_48.0 , lowercase_ : float = 1e-5 , lowercase_ : int = 1 , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = sigma_max # setable values SCREAMING_SNAKE_CASE_ : List[str] = None self.set_sigmas(lowercase_ , lowercase_ , lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : torch.FloatTensor , lowercase_ : Optional[int] = None): '''simple docstring''' return sample def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : int , lowercase_ : float = None , lowercase_ : Union[str, torch.device] = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = sampling_eps if sampling_eps is not None else self.config.sampling_eps SCREAMING_SNAKE_CASE_ : Dict = torch.linspace(1 , lowercase_ , lowercase_ , device=lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : int , lowercase_ : float = None , lowercase_ : float = None , lowercase_ : float = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min SCREAMING_SNAKE_CASE_ : Optional[Any] = sigma_max if sigma_max is not None else self.config.sigma_max SCREAMING_SNAKE_CASE_ : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.exp(torch.linspace(math.log(lowercase_) , math.log(lowercase_) , lowercase_)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps]) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Optional[Any] , lowercase_ : List[Any]): '''simple docstring''' return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device)) , self.discrete_sigmas[timesteps - 1].to(timesteps.device) , ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : torch.FloatTensor , lowercase_ : int , lowercase_ : torch.FloatTensor , lowercase_ : Optional[torch.Generator] = None , lowercase_ : bool = True , ): '''simple docstring''' if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''') SCREAMING_SNAKE_CASE_ : Dict = timestep * torch.ones( sample.shape[0] , device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0]) SCREAMING_SNAKE_CASE_ : Any = (timestep * (len(self.timesteps) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda SCREAMING_SNAKE_CASE_ : List[Any] = timesteps.to(self.discrete_sigmas.device) SCREAMING_SNAKE_CASE_ : int = self.discrete_sigmas[timesteps].to(sample.device) SCREAMING_SNAKE_CASE_ : Dict = self.get_adjacent_sigma(lowercase_ , lowercase_).to(sample.device) SCREAMING_SNAKE_CASE_ : List[str] = torch.zeros_like(lowercase_) SCREAMING_SNAKE_CASE_ : int = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods SCREAMING_SNAKE_CASE_ : List[Any] = diffusion.flatten() while len(diffusion.shape) < len(sample.shape): SCREAMING_SNAKE_CASE_ : int = diffusion.unsqueeze(-1) SCREAMING_SNAKE_CASE_ : List[str] = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of SCREAMING_SNAKE_CASE_ : Tuple = randn_tensor( sample.shape , layout=sample.layout , generator=lowercase_ , device=sample.device , dtype=sample.dtype) SCREAMING_SNAKE_CASE_ : List[Any] = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? SCREAMING_SNAKE_CASE_ : Optional[int] = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowercase_ , prev_sample_mean=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : torch.FloatTensor , lowercase_ : torch.FloatTensor , lowercase_ : Optional[torch.Generator] = None , lowercase_ : bool = True , ): '''simple docstring''' if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''') # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction SCREAMING_SNAKE_CASE_ : int = randn_tensor(sample.shape , layout=sample.layout , generator=lowercase_).to(sample.device) # compute step size from the model_output, the noise, and the snr SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1) , dim=-1).mean() SCREAMING_SNAKE_CASE_ : List[str] = torch.norm(noise.reshape(noise.shape[0] , -1) , dim=-1).mean() SCREAMING_SNAKE_CASE_ : Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 SCREAMING_SNAKE_CASE_ : Union[str, Any] = step_size * torch.ones(sample.shape[0]).to(sample.device) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term SCREAMING_SNAKE_CASE_ : Dict = step_size.flatten() while len(step_size.shape) < len(sample.shape): SCREAMING_SNAKE_CASE_ : Dict = step_size.unsqueeze(-1) SCREAMING_SNAKE_CASE_ : str = sample + step_size * model_output SCREAMING_SNAKE_CASE_ : Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowercase_) def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : torch.FloatTensor , lowercase_ : torch.FloatTensor , lowercase_ : torch.FloatTensor , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = timesteps.to(original_samples.device) SCREAMING_SNAKE_CASE_ : Any = self.discrete_sigmas.to(original_samples.device)[timesteps] SCREAMING_SNAKE_CASE_ : Tuple = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowercase_) * sigmas[:, None, None, None] ) SCREAMING_SNAKE_CASE_ : List[Any] = noise + original_samples return noisy_samples def __len__( self : Union[str, Any]): '''simple docstring''' return self.config.num_train_timesteps
318
"""simple docstring""" from __future__ import annotations UpperCAmelCase_ : List[str] = list[list[int]] # assigning initial values to the grid UpperCAmelCase_ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCAmelCase_ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def _A (__a , __a , __a , __a ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def _A (__a ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def _A (__a ) -> Matrix | None: """simple docstring""" if location := find_empty_location(__a ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__a , __a , __a , __a ): SCREAMING_SNAKE_CASE_ : Tuple = digit if sudoku(__a ) is not None: return grid SCREAMING_SNAKE_CASE_ : Any = 0 return None def _A (__a ) -> None: """simple docstring""" for row in grid: for cell in row: print(__a , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") UpperCAmelCase_ : str = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
318
1
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ : Tuple = BlipImageProcessor() SCREAMING_SNAKE_CASE_ : Union[str, Any] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''') SCREAMING_SNAKE_CASE_ : str = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''') SCREAMING_SNAKE_CASE_ : List[str] = InstructBlipProcessor(lowercase_ , lowercase_ , lowercase_) processor.save_pretrained(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : Any , **lowercase_ : Optional[Any]): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).tokenizer def _SCREAMING_SNAKE_CASE ( self : List[str] , **lowercase_ : int): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).image_processor def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowercase_ : Optional[int]): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).qformer_tokenizer def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] SCREAMING_SNAKE_CASE_ : Tuple = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''') SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0) SCREAMING_SNAKE_CASE_ : Union[str, Any] = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowercase_ , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , lowercase_) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , lowercase_) self.assertIsInstance(processor.qformer_tokenizer , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Tuple = self.get_qformer_tokenizer() SCREAMING_SNAKE_CASE_ : Optional[int] = InstructBlipProcessor( tokenizer=lowercase_ , image_processor=lowercase_ , qformer_tokenizer=lowercase_) SCREAMING_SNAKE_CASE_ : str = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor(lowercase_ , return_tensors='''np''') SCREAMING_SNAKE_CASE_ : str = processor(images=lowercase_ , return_tensors='''np''') for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor() SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Any = self.get_qformer_tokenizer() SCREAMING_SNAKE_CASE_ : Optional[int] = InstructBlipProcessor( tokenizer=lowercase_ , image_processor=lowercase_ , qformer_tokenizer=lowercase_) SCREAMING_SNAKE_CASE_ : str = '''lower newer''' SCREAMING_SNAKE_CASE_ : Any = processor(text=lowercase_) SCREAMING_SNAKE_CASE_ : Any = tokenizer(lowercase_ , return_token_type_ids=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = qformer_tokenizer(lowercase_ , return_token_type_ids=lowercase_) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key]) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key]) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : str = self.get_qformer_tokenizer() SCREAMING_SNAKE_CASE_ : Optional[int] = InstructBlipProcessor( tokenizer=lowercase_ , image_processor=lowercase_ , qformer_tokenizer=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = '''lower newer''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : int = processor(text=lowercase_ , images=lowercase_) self.assertListEqual( list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , ) # test if it raises when no input is passed with pytest.raises(lowercase_): processor() def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Dict = self.get_qformer_tokenizer() SCREAMING_SNAKE_CASE_ : List[str] = InstructBlipProcessor( tokenizer=lowercase_ , image_processor=lowercase_ , qformer_tokenizer=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE_ : Any = processor.batch_decode(lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.batch_decode(lowercase_) self.assertListEqual(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Tuple = self.get_qformer_tokenizer() SCREAMING_SNAKE_CASE_ : Union[str, Any] = InstructBlipProcessor( tokenizer=lowercase_ , image_processor=lowercase_ , qformer_tokenizer=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = '''lower newer''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(text=lowercase_ , images=lowercase_) self.assertListEqual( list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
318
"""simple docstring""" from itertools import permutations def _A (__a ) -> bool: """simple docstring""" if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False SCREAMING_SNAKE_CASE_ : List[str] = [7, 11, 13, 17] for i, test in enumerate(__a ): if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def _A (__a = 10 ) -> int: """simple docstring""" return sum( int(''''''.join(map(__a , __a ) ) ) for num in permutations(range(__a ) ) if is_substring_divisible(__a ) ) if __name__ == "__main__": print(f'''{solution() = }''')
318
1
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Dict , lowercase_ : Dict , lowercase_ : Tuple=13 , lowercase_ : Any=7 , lowercase_ : List[Any]=True , lowercase_ : int=True , lowercase_ : Tuple=True , lowercase_ : int=99 , lowercase_ : Optional[Any]=32 , lowercase_ : Optional[Any]=5 , lowercase_ : Optional[Any]=4 , lowercase_ : int=37 , lowercase_ : Tuple="gelu" , lowercase_ : Any=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : List[Any]=512 , lowercase_ : Optional[Any]=16 , lowercase_ : Any=2 , lowercase_ : List[str]=0.02 , lowercase_ : List[str]=3 , lowercase_ : int=4 , lowercase_ : Optional[Any]=None , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = parent SCREAMING_SNAKE_CASE_ : int = batch_size SCREAMING_SNAKE_CASE_ : Dict = seq_length SCREAMING_SNAKE_CASE_ : str = is_training SCREAMING_SNAKE_CASE_ : int = use_token_type_ids SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size SCREAMING_SNAKE_CASE_ : str = hidden_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size SCREAMING_SNAKE_CASE_ : Any = hidden_act SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings SCREAMING_SNAKE_CASE_ : Tuple = type_vocab_size SCREAMING_SNAKE_CASE_ : Dict = type_sequence_label_size SCREAMING_SNAKE_CASE_ : Dict = initializer_range SCREAMING_SNAKE_CASE_ : Optional[int] = num_labels SCREAMING_SNAKE_CASE_ : Any = num_choices SCREAMING_SNAKE_CASE_ : int = scope SCREAMING_SNAKE_CASE_ : str = self.vocab_size - 1 def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ : str = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE_ : Any = None SCREAMING_SNAKE_CASE_ : Tuple = None SCREAMING_SNAKE_CASE_ : Optional[Any] = None if self.use_labels: SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size] , self.num_choices) SCREAMING_SNAKE_CASE_ : List[Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[Any] , *lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = OpenAIGPTModel(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase_ , token_type_ids=lowercase_ , head_mask=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase_ , token_type_ids=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Any , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict , *lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = OpenAIGPTLMHeadModel(lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : List[Any] , *lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = OpenAIGPTDoubleHeadsModel(lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Dict = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : str , *lowercase_ : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.num_labels SCREAMING_SNAKE_CASE_ : Tuple = OpenAIGPTForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) : str = config_and_inputs SCREAMING_SNAKE_CASE_ : Dict = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_torch class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) __UpperCamelCase = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly __UpperCamelCase = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Tuple , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Optional[Any]): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Any , lowercase_ : str , lowercase_ : str=False): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": SCREAMING_SNAKE_CASE_ : List[str] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ , ) SCREAMING_SNAKE_CASE_ : List[Any] = inputs_dict['''labels'''] SCREAMING_SNAKE_CASE_ : str = inputs_dict['''labels'''] SCREAMING_SNAKE_CASE_ : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase_ , ) SCREAMING_SNAKE_CASE_ : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_) return inputs_dict def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = OpenAIGPTModelTester(self) SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self , config_class=lowercase_ , n_embd=37) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase_) @slow def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : Tuple = OpenAIGPTModel.from_pretrained(lowercase_) self.assertIsNotNone(lowercase_) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''') model.to(lowercase_) SCREAMING_SNAKE_CASE_ : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase_) # the president is SCREAMING_SNAKE_CASE_ : Any = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the SCREAMING_SNAKE_CASE_ : str = model.generate(lowercase_ , do_sample=lowercase_) self.assertListEqual(output_ids[0].tolist() , lowercase_)
318
"""simple docstring""" UpperCAmelCase_ : List[Any] = 9.8_0_6_6_5 def _A (__a , __a , __a = g ) -> float: """simple docstring""" if fluid_density <= 0: raise ValueError('''Impossible fluid density''' ) if volume < 0: raise ValueError('''Impossible Object volume''' ) if gravity <= 0: raise ValueError('''Impossible Gravity''' ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
318
1
"""simple docstring""" from typing import Any import numpy as np def _A (__a ) -> bool: """simple docstring""" return np.array_equal(__a , matrix.conjugate().T ) def _A (__a , __a ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = v.conjugate().T SCREAMING_SNAKE_CASE_ : Tuple = v_star.dot(__a ) assert isinstance(__a , np.ndarray ) return (v_star_dot.dot(__a )) / (v_star.dot(__a )) def _A () -> None: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] ) SCREAMING_SNAKE_CASE_ : List[str] = np.array([[1], [2], [3]] ) assert is_hermitian(__a ), f'{a} is not hermitian.' print(rayleigh_quotient(__a , __a ) ) SCREAMING_SNAKE_CASE_ : Any = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(__a ), f'{a} is not hermitian.' assert rayleigh_quotient(__a , __a ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
318
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCAmelCase_ : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def _A (__a ) -> Union[str, Any]: """simple docstring""" from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__a ) def _A (__a ) -> Any: """simple docstring""" from transformers.testing_utils import pytest_terminal_summary_main SCREAMING_SNAKE_CASE_ : Optional[Any] = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__a , id=__a )
318
1
"""simple docstring""" from collections import defaultdict def _A (__a , __a ) -> bool: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = first_str.lower().strip() SCREAMING_SNAKE_CASE_ : List[Any] = second_str.lower().strip() # Remove whitespace SCREAMING_SNAKE_CASE_ : Dict = first_str.replace(''' ''' , '''''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(__a ) != len(__a ): return False # Default values for count should be 0 SCREAMING_SNAKE_CASE_ : defaultdict[str, int] = defaultdict(__a ) # For each character in input strings, # increment count in the corresponding for i in range(len(__a ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ : Any = input("""Enter the first string """).strip() UpperCAmelCase_ : Optional[int] = input("""Enter the second string """).strip() UpperCAmelCase_ : Union[str, Any] = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
318
"""simple docstring""" import argparse import os import re import packaging.version UpperCAmelCase_ : Any = """examples/""" UpperCAmelCase_ : Optional[int] = { """examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""), """doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } UpperCAmelCase_ : List[Any] = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } UpperCAmelCase_ : Optional[int] = """README.md""" def _A (__a , __a , __a ) -> int: """simple docstring""" with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = REPLACE_PATTERNS[pattern] SCREAMING_SNAKE_CASE_ : Optional[int] = replace.replace('''VERSION''' , __a ) SCREAMING_SNAKE_CASE_ : Tuple = re_pattern.sub(__a , __a ) with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(__a ) def _A (__a ) -> int: """simple docstring""" for folder, directories, fnames in os.walk(__a ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(__a , __a ) , __a , pattern='''examples''' ) def _A (__a , __a=False ) -> List[str]: """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__a , __a , __a ) if not patch: update_version_in_examples(__a ) def _A () -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = '''🤗 Transformers currently provides the following architectures''' SCREAMING_SNAKE_CASE_ : Optional[int] = '''1. Want to contribute a new model?''' with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: SCREAMING_SNAKE_CASE_ : Tuple = f.readlines() # Find the start of the list. SCREAMING_SNAKE_CASE_ : Tuple = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 SCREAMING_SNAKE_CASE_ : Dict = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): SCREAMING_SNAKE_CASE_ : List[Any] = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(__a ) def _A () -> List[str]: """simple docstring""" with open(REPLACE_FILES['''init'''] , '''r''' ) as f: SCREAMING_SNAKE_CASE_ : Any = f.read() SCREAMING_SNAKE_CASE_ : Dict = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0] return packaging.version.parse(__a ) def _A (__a=False ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: SCREAMING_SNAKE_CASE_ : List[Any] = default_version.base_version elif patch: SCREAMING_SNAKE_CASE_ : int = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: SCREAMING_SNAKE_CASE_ : Any = f'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. SCREAMING_SNAKE_CASE_ : int = input(f'Which version are you releasing? [{default_version}]' ) if len(__a ) == 0: SCREAMING_SNAKE_CASE_ : Optional[Any] = default_version print(f'Updating version to {version}.' ) global_version_update(__a , patch=__a ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def _A () -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = get_version() SCREAMING_SNAKE_CASE_ : Any = f'{current_version.major}.{current_version.minor + 1}.0.dev0' SCREAMING_SNAKE_CASE_ : Union[str, Any] = current_version.base_version # Check with the user we got that right. SCREAMING_SNAKE_CASE_ : int = input(f'Which version are we developing now? [{dev_version}]' ) if len(__a ) == 0: SCREAMING_SNAKE_CASE_ : Optional[int] = dev_version print(f'Updating version to {version}.' ) global_version_update(__a ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") UpperCAmelCase_ : int = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
318
1
"""simple docstring""" from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def _A (__a , __a , __a , __a , ) -> list[float]: """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = coefficient_matrix.shape SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = constant_matrix.shape if rowsa != colsa: SCREAMING_SNAKE_CASE_ : List[Any] = f'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}' raise ValueError(__a ) if colsa != 1: SCREAMING_SNAKE_CASE_ : int = f'Constant matrix must be nx1 but received {rowsa}x{colsa}' raise ValueError(__a ) if rowsa != rowsa: SCREAMING_SNAKE_CASE_ : Tuple = ( '''Coefficient and constant matrices dimensions must be nxn and nx1 but ''' f'received {rowsa}x{colsa} and {rowsa}x{colsa}' ) raise ValueError(__a ) if len(__a ) != rowsa: SCREAMING_SNAKE_CASE_ : str = ( '''Number of initial values must be equal to number of rows in coefficient ''' f'matrix but received {len(__a )} and {rowsa}' ) raise ValueError(__a ) if iterations <= 0: raise ValueError('''Iterations must be at least 1''' ) SCREAMING_SNAKE_CASE_ : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = table.shape strictly_diagonally_dominant(__a ) # Iterates the whole matrix for given number of times for _ in range(__a ): SCREAMING_SNAKE_CASE_ : List[str] = [] for row in range(__a ): SCREAMING_SNAKE_CASE_ : Any = 0 for col in range(__a ): if col == row: SCREAMING_SNAKE_CASE_ : Any = table[row][col] elif col == cols - 1: SCREAMING_SNAKE_CASE_ : Dict = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] SCREAMING_SNAKE_CASE_ : List[str] = (temp + val) / denom new_val.append(__a ) SCREAMING_SNAKE_CASE_ : Tuple = new_val return [float(__a ) for i in new_val] def _A (__a ) -> bool: """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = table.shape SCREAMING_SNAKE_CASE_ : Tuple = True for i in range(0 , __a ): SCREAMING_SNAKE_CASE_ : Dict = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
318
"""simple docstring""" from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def _A (__a , __a , __a=1e-12 ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T SCREAMING_SNAKE_CASE_ : List[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T return jnp.matmul(__a , norm_emb_a.T ) class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = jnp.floataa def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxCLIPVisionModule(self.config.vision_config) SCREAMING_SNAKE_CASE_ : Tuple = nn.Dense(self.config.projection_dim , use_bias=lowercase_ , dtype=self.dtype) SCREAMING_SNAKE_CASE_ : List[str] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param( '''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim)) SCREAMING_SNAKE_CASE_ : Dict = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,)) def __call__( self : Optional[Any] , lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.vision_model(lowercase_)[1] SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.visual_projection(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.special_care_embeds) SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.concept_embeds) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs SCREAMING_SNAKE_CASE_ : Tuple = 0.0 SCREAMING_SNAKE_CASE_ : Dict = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.round(lowercase_ , 3) SCREAMING_SNAKE_CASE_ : List[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=lowercase_) # Use a lower threshold if an image has any special care concept SCREAMING_SNAKE_CASE_ : Dict = is_special_care * 0.01 SCREAMING_SNAKE_CASE_ : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment SCREAMING_SNAKE_CASE_ : Any = jnp.round(lowercase_ , 3) SCREAMING_SNAKE_CASE_ : Dict = jnp.any(concept_scores > 0 , axis=1) return has_nsfw_concepts class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = CLIPConfig __UpperCamelCase = "clip_input" __UpperCamelCase = FlaxStableDiffusionSafetyCheckerModule def __init__( self : Union[str, Any] , lowercase_ : CLIPConfig , lowercase_ : Optional[Tuple] = None , lowercase_ : int = 0 , lowercase_ : jnp.dtype = jnp.floataa , lowercase_ : bool = True , **lowercase_ : Any , ): '''simple docstring''' if input_shape is None: SCREAMING_SNAKE_CASE_ : List[str] = (1, 224, 224, 3) SCREAMING_SNAKE_CASE_ : List[Any] = self.module_class(config=lowercase_ , dtype=lowercase_ , **lowercase_) super().__init__(lowercase_ , lowercase_ , input_shape=lowercase_ , seed=lowercase_ , dtype=lowercase_ , _do_init=_do_init) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : jax.random.KeyArray , lowercase_ : Tuple , lowercase_ : FrozenDict = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = jax.random.normal(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.random.split(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = {'''params''': params_rng, '''dropout''': dropout_rng} SCREAMING_SNAKE_CASE_ : List[Any] = self.module.init(lowercase_ , lowercase_)['''params'''] return random_params def __call__( self : List[Any] , lowercase_ : List[str] , lowercase_ : dict = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = jnp.transpose(lowercase_ , (0, 2, 3, 1)) return self.module.apply( {'''params''': params or self.params} , jnp.array(lowercase_ , dtype=jnp.floataa) , rngs={} , )
318
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase_ : Tuple = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Tuple = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[Any] = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
318
"""simple docstring""" from __future__ import annotations import queue class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Tuple , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = data SCREAMING_SNAKE_CASE_ : Tuple = None SCREAMING_SNAKE_CASE_ : Dict = None def _A () -> TreeNode: """simple docstring""" print('''\n********Press N to stop entering at any point of time********\n''' ) SCREAMING_SNAKE_CASE_ : List[Any] = input('''Enter the value of the root node: ''' ).strip().lower() SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue() SCREAMING_SNAKE_CASE_ : Union[str, Any] = TreeNode(int(__a ) ) q.put(__a ) while not q.empty(): SCREAMING_SNAKE_CASE_ : Optional[int] = q.get() SCREAMING_SNAKE_CASE_ : List[str] = f'Enter the left node of {node_found.data}: ' SCREAMING_SNAKE_CASE_ : Optional[int] = input(__a ).strip().lower() or '''n''' if check == "n": return tree_node SCREAMING_SNAKE_CASE_ : List[str] = TreeNode(int(__a ) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = left_node q.put(__a ) SCREAMING_SNAKE_CASE_ : str = f'Enter the right node of {node_found.data}: ' SCREAMING_SNAKE_CASE_ : str = input(__a ).strip().lower() or '''n''' if check == "n": return tree_node SCREAMING_SNAKE_CASE_ : Any = TreeNode(int(__a ) ) SCREAMING_SNAKE_CASE_ : int = right_node q.put(__a ) raise def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return print(node.data , end=''',''' ) pre_order(node.left ) pre_order(node.right ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return in_order(node.left ) print(node.data , end=''',''' ) in_order(node.right ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=''',''' ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue() q.put(__a ) while not q.empty(): SCREAMING_SNAKE_CASE_ : Tuple = q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue() q.put(__a ) while not q.empty(): SCREAMING_SNAKE_CASE_ : str = [] while not q.empty(): SCREAMING_SNAKE_CASE_ : List[str] = q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__a ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ : list[TreeNode] = [] SCREAMING_SNAKE_CASE_ : Union[str, Any] = node while n or stack: while n: # start from root node, find its left child print(n.data , end=''',''' ) stack.append(__a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = n.left # end of while means current node doesn't have left child SCREAMING_SNAKE_CASE_ : Tuple = stack.pop() # start to traverse its right child SCREAMING_SNAKE_CASE_ : str = n.right def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ : list[TreeNode] = [] SCREAMING_SNAKE_CASE_ : Any = node while n or stack: while n: stack.append(__a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.left SCREAMING_SNAKE_CASE_ : Any = stack.pop() print(n.data , end=''',''' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.right def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = [], [] SCREAMING_SNAKE_CASE_ : List[Any] = node stacka.append(__a ) while stacka: # to find the reversed order of post order, store it in stack2 SCREAMING_SNAKE_CASE_ : List[str] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__a ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=''',''' ) def _A (__a = "" , __a=50 , __a="*" ) -> str: """simple docstring""" if not s: return "\n" + width * char SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(width - len(__a ) - 2 , 2 ) return f'{left * char} {s} {(left + extra) * char}' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) UpperCAmelCase_ : TreeNode = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
318
1
"""simple docstring""" from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _A () -> str: """simple docstring""" import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join SCREAMING_SNAKE_CASE_ : str = '''__test_patch_submodule_mock__''' with patch_submodule(_test_patching , '''os.path.join''' , __a ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _A () -> Optional[Any]: """simple docstring""" assert _test_patching.open is open SCREAMING_SNAKE_CASE_ : Optional[int] = '''__test_patch_submodule_builtin_mock__''' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , '''open''' , __a ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _A () -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = '''__test_patch_submodule_missing_mock__''' with patch_submodule(_test_patching , '''pandas.read_csv''' , __a ): pass def _A () -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = '''__test_patch_submodule_missing_builtin_mock__''' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , '''len''' , __a ) is None with patch_submodule(_test_patching , '''len''' , __a ): assert _test_patching.len is mock assert _test_patching.len is len def _A () -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''__test_patch_submodule_start_and_stop_mock__''' SCREAMING_SNAKE_CASE_ : str = patch_submodule(_test_patching , '''open''' , __a ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _A () -> List[str]: """simple docstring""" from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join SCREAMING_SNAKE_CASE_ : Any = '''__test_patch_submodule_successive_join__''' SCREAMING_SNAKE_CASE_ : List[Any] = '''__test_patch_submodule_successive_dirname__''' SCREAMING_SNAKE_CASE_ : Any = '''__test_patch_submodule_successive_rename__''' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , '''os.path.join''' , __a ): with patch_submodule(_test_patching , '''os.rename''' , __a ): with patch_submodule(_test_patching , '''os.path.dirname''' , __a ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , '''os.rename''' , __a ): with patch_submodule(_test_patching , '''os.path.join''' , __a ): with patch_submodule(_test_patching , '''os.path.dirname''' , __a ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _A () -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = '''__test_patch_submodule_doesnt_exist_mock__''' with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , __a ): pass with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , __a ): pass
318
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = "ssube/stable-diffusion-x4-upscaler-onnx" def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any]=0): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_)) SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Any = np.array( [0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23]) assert np.abs(image_slice - expected_slice).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') SCREAMING_SNAKE_CASE_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Any = np.array( [0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Tuple = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Tuple = np.array( [0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') SCREAMING_SNAKE_CASE_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array( [0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') SCREAMING_SNAKE_CASE_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : int = np.array( [0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = ort.SessionOptions() SCREAMING_SNAKE_CASE_ : Optional[int] = False return options def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''') SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128)) # using the PNDM scheduler by default SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''A fantasy landscape, trending on artstation''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : List[Any] = pipe( prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type='''np''' , ) SCREAMING_SNAKE_CASE_ : Optional[int] = output.images SCREAMING_SNAKE_CASE_ : Optional[int] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : int = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''') SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128)) SCREAMING_SNAKE_CASE_ : Tuple = LMSDiscreteScheduler.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''') SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : int = '''A fantasy landscape, trending on artstation''' SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : int = pipe( prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type='''np''' , ) SCREAMING_SNAKE_CASE_ : Optional[int] = output.images SCREAMING_SNAKE_CASE_ : Dict = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : List[str] = np.array( [0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
318
1
"""simple docstring""" import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class lowerCAmelCase__ : '''simple docstring''' def __init__( self : str , lowercase_ : Optional[Any] , lowercase_ : int=13 , lowercase_ : Optional[Any]=10 , lowercase_ : int=3 , lowercase_ : str=2 , lowercase_ : Dict=2 , lowercase_ : Dict=2 , lowercase_ : Tuple=True , lowercase_ : Dict=True , lowercase_ : Optional[Any]=32 , lowercase_ : str=5 , lowercase_ : Optional[int]=4 , lowercase_ : int=37 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Dict=10 , lowercase_ : Dict=0.02 , lowercase_ : List[Any]=0.9 , lowercase_ : List[Any]=None , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = parent SCREAMING_SNAKE_CASE_ : Dict = batch_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size SCREAMING_SNAKE_CASE_ : str = num_channels SCREAMING_SNAKE_CASE_ : Tuple = patch_size SCREAMING_SNAKE_CASE_ : List[Any] = tubelet_size SCREAMING_SNAKE_CASE_ : List[str] = num_frames SCREAMING_SNAKE_CASE_ : List[Any] = is_training SCREAMING_SNAKE_CASE_ : List[Any] = use_labels SCREAMING_SNAKE_CASE_ : Any = hidden_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size SCREAMING_SNAKE_CASE_ : int = hidden_act SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : Union[str, Any] = type_sequence_label_size SCREAMING_SNAKE_CASE_ : Tuple = initializer_range SCREAMING_SNAKE_CASE_ : Tuple = mask_ratio SCREAMING_SNAKE_CASE_ : Dict = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame SCREAMING_SNAKE_CASE_ : str = (image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE_ : Optional[int] = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos SCREAMING_SNAKE_CASE_ : Optional[int] = int(mask_ratio * self.seq_length) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_ : Union[str, Any] = None if self.use_labels: SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ : str = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , ) def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = VideoMAEModel(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = VideoMAEForPreTraining(lowercase_) model.to(lowercase_) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.ones((self.num_masks,)) SCREAMING_SNAKE_CASE_ : List[str] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0))]) SCREAMING_SNAKE_CASE_ : List[str] = mask.expand(self.batch_size , -1).bool() SCREAMING_SNAKE_CASE_ : int = model(lowercase_ , lowercase_) # model only returns predictions for masked patches SCREAMING_SNAKE_CASE_ : str = mask.sum().item() SCREAMING_SNAKE_CASE_ : Dict = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels)) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = config_and_inputs SCREAMING_SNAKE_CASE_ : Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) __UpperCamelCase = ( {"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification} if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = VideoMAEModelTester(self) SCREAMING_SNAKE_CASE_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Any=False): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(lowercase_) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch SCREAMING_SNAKE_CASE_ : Tuple = torch.ones((self.model_tester.num_masks,)) SCREAMING_SNAKE_CASE_ : List[str] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))]) SCREAMING_SNAKE_CASE_ : Tuple = mask.expand(self.model_tester.batch_size , -1).bool() SCREAMING_SNAKE_CASE_ : List[str] = bool_masked_pos.to(lowercase_) if return_labels: if model_class in [ *get_values(lowercase_), ]: SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_) return inputs_dict def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''VideoMAE does not use inputs_embeds''') def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Dict = model_class(lowercase_) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) SCREAMING_SNAKE_CASE_ : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear)) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ : Any = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ : List[str] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowercase_) @slow def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : List[Any] = VideoMAEModel.from_pretrained(lowercase_) self.assertIsNotNone(lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' if not self.has_attentions: pass else: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : str = True for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.seq_length - self.model_tester.num_masks SCREAMING_SNAKE_CASE_ : Any = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) SCREAMING_SNAKE_CASE_ : Optional[int] = True SCREAMING_SNAKE_CASE_ : Dict = False SCREAMING_SNAKE_CASE_ : List[Any] = True SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowercase_) model.to(lowercase_) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_)) SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs.attentions self.assertEqual(len(lowercase_) , self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE_ : Any = True SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(lowercase_) model.to(lowercase_) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Tuple = model(**self._prepare_for_class(lowercase_ , lowercase_)) SCREAMING_SNAKE_CASE_ : List[Any] = outputs.attentions self.assertEqual(len(lowercase_) , self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) SCREAMING_SNAKE_CASE_ : str = len(lowercase_) # Check attention is always last and order is fine SCREAMING_SNAKE_CASE_ : Dict = True SCREAMING_SNAKE_CASE_ : List[str] = True SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(lowercase_) model.to(lowercase_) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_)) self.assertEqual(out_len + 1 , len(lowercase_)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs.attentions self.assertEqual(len(lowercase_) , self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' def check_hidden_states_output(lowercase_ : Any , lowercase_ : Tuple , lowercase_ : int): SCREAMING_SNAKE_CASE_ : Dict = model_class(lowercase_) model.to(lowercase_) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_)) SCREAMING_SNAKE_CASE_ : str = outputs.hidden_states SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(lowercase_) , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.seq_length - self.model_tester.num_masks SCREAMING_SNAKE_CASE_ : str = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : List[Any] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE_ : Optional[int] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''') def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' pass def _A () -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' ) SCREAMING_SNAKE_CASE_ : Any = np.load(__a ) return list(__a ) @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5]) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''').to( lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = self.default_image_processor SCREAMING_SNAKE_CASE_ : str = prepare_video() SCREAMING_SNAKE_CASE_ : Dict = image_processor(lowercase_ , return_tensors='''pt''').to(lowercase_) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ : int = model(**lowercase_) # verify the logits SCREAMING_SNAKE_CASE_ : Dict = torch.Size((1, 400)) self.assertEqual(outputs.logits.shape , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([0.36_69, -0.06_88, -0.24_21]).to(lowercase_) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4)) @slow def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''').to(lowercase_) SCREAMING_SNAKE_CASE_ : Any = self.default_image_processor SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_video() SCREAMING_SNAKE_CASE_ : List[str] = image_processor(lowercase_ , return_tensors='''pt''').to(lowercase_) # add boolean mask, indicating which patches to mask SCREAMING_SNAKE_CASE_ : Dict = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.load(lowercase_) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Optional[int] = model(**lowercase_) # verify the logits SCREAMING_SNAKE_CASE_ : Tuple = torch.Size([1, 1408, 1536]) SCREAMING_SNAKE_CASE_ : str = torch.tensor( [[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=lowercase_) self.assertEqual(outputs.logits.shape , lowercase_) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowercase_ , atol=1e-4)) # verify the loss (`config.norm_pix_loss` = `True`) SCREAMING_SNAKE_CASE_ : Any = torch.tensor([0.51_42] , device=lowercase_) self.assertTrue(torch.allclose(outputs.loss , lowercase_ , atol=1e-4)) # verify the loss (`config.norm_pix_loss` = `False`) SCREAMING_SNAKE_CASE_ : Optional[int] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=lowercase_).to( lowercase_) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**lowercase_) SCREAMING_SNAKE_CASE_ : Any = torch.tensor(torch.tensor([0.64_69]) , device=lowercase_) self.assertTrue(torch.allclose(outputs.loss , lowercase_ , atol=1e-4))
318
"""simple docstring""" from scipy.stats import pearsonr import datasets UpperCAmelCase_ : List[Any] = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ UpperCAmelCase_ : Optional[int] = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ UpperCAmelCase_ : Tuple = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float'''), '''references''': datasets.Value('''float'''), }) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=False): '''simple docstring''' if return_pvalue: SCREAMING_SNAKE_CASE_ : int = pearsonr(lowercase_ , lowercase_) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(lowercase_ , lowercase_)[0])}
318
1
"""simple docstring""" import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @parameterized.expand([(None,), ('''foo.json''',)]) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ , config_name=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , lowercase_) self.assertEqual(loaded_config.temperature , 0.7) self.assertEqual(loaded_config.length_penalty , 1.0) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50) self.assertEqual(loaded_config.max_length , 20) self.assertEqual(loaded_config.max_time , lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained('''gpt2''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = GenerationConfig.from_model_config(lowercase_) SCREAMING_SNAKE_CASE_ : str = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(lowercase_ , lowercase_) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = GenerationConfig() SCREAMING_SNAKE_CASE_ : Tuple = { '''max_new_tokens''': 1024, '''foo''': '''bar''', } SCREAMING_SNAKE_CASE_ : List[Any] = copy.deepcopy(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = generation_config.update(**lowercase_) # update_kwargs was not modified (no side effects) self.assertEqual(lowercase_ , lowercase_) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024) # `.update()` returns a dictionary of unused kwargs self.assertEqual(lowercase_ , {'''foo''': '''bar'''}) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = GenerationConfig() SCREAMING_SNAKE_CASE_ : Optional[int] = '''bar''' with tempfile.TemporaryDirectory('''test-generation-config''') as tmp_dir: generation_config.save_pretrained(lowercase_) SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained(lowercase_) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , '''bar''') SCREAMING_SNAKE_CASE_ : Dict = GenerationConfig.from_model_config(lowercase_) assert not hasattr(lowercase_ , '''foo''') # no new kwargs should be initialized if from config def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = GenerationConfig() self.assertEqual(default_config.temperature , 1.0) self.assertEqual(default_config.do_sample , lowercase_) self.assertEqual(default_config.num_beams , 1) SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7) self.assertEqual(config.do_sample , lowercase_) self.assertEqual(config.num_beams , 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_) SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0) self.assertEqual(loaded_config.temperature , 1.0) self.assertEqual(loaded_config.do_sample , lowercase_) self.assertEqual(loaded_config.num_beams , 1) # default value @is_staging_test class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = TOKEN HfFolder.save_token(lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Tuple): '''simple docstring''' try: delete_repo(token=cls._token , repo_id='''test-generation-config''') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''') except HTTPError: pass def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''test-generation-config''' , use_auth_token=self._token) SCREAMING_SNAKE_CASE_ : List[str] = GenerationConfig.from_pretrained(F'{USER}/test-generation-config') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_)) # Reset repo delete_repo(token=self._token , repo_id='''test-generation-config''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id='''test-generation-config''' , push_to_hub=lowercase_ , use_auth_token=self._token) SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained(F'{USER}/test-generation-config') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token) SCREAMING_SNAKE_CASE_ : int = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_)) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=lowercase_ , use_auth_token=self._token) SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
318
"""simple docstring""" import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class lowerCAmelCase__ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Any , lowercase_ : Dict[str, int] , lowercase_ : List[str] , lowercase_ : int = None , lowercase_ : int = None): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE_ : str = pad_token_id SCREAMING_SNAKE_CASE_ : Optional[int] = max_length SCREAMING_SNAKE_CASE_ : Dict = vocab SCREAMING_SNAKE_CASE_ : Dict = merges SCREAMING_SNAKE_CASE_ : Union[str, Any] = BytePairTokenizer(lowercase_ , lowercase_ , sequence_length=lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : GPTaTokenizer , *lowercase_ : Optional[Any] , **lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = [''' '''.join(lowercase_) for m in tokenizer.bpe_ranks.keys()] SCREAMING_SNAKE_CASE_ : str = tokenizer.get_vocab() return cls(lowercase_ , lowercase_ , *lowercase_ , **lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : int , lowercase_ : Union[str, os.PathLike] , *lowercase_ : List[str] , **lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = GPTaTokenizer.from_pretrained(lowercase_ , *lowercase_ , **lowercase_) return cls.from_tokenizer(lowercase_ , *lowercase_ , **lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : List[Any]): '''simple docstring''' return cls(**lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[Any] , lowercase_ : int = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.tf_tokenizer(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = tf.ones_like(lowercase_) if self.pad_token_id is not None: # pad the tokens up to max length SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_length if max_length is not None else self.max_length if max_length is not None: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = pad_model_inputs( lowercase_ , max_seq_length=lowercase_ , pad_value=self.pad_token_id) return {"attention_mask": attention_mask, "input_ids": input_ids}
318
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Any = logging.get_logger(__name__) UpperCAmelCase_ : Dict = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "megatron-bert" def __init__( self : Dict , lowercase_ : List[Any]=29056 , lowercase_ : Union[str, Any]=1024 , lowercase_ : Any=24 , lowercase_ : Dict=16 , lowercase_ : List[str]=4096 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Any=512 , lowercase_ : Tuple=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Any=1e-12 , lowercase_ : Union[str, Any]=0 , lowercase_ : Dict="absolute" , lowercase_ : List[str]=True , **lowercase_ : Any , ): '''simple docstring''' super().__init__(pad_token_id=lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size SCREAMING_SNAKE_CASE_ : List[str] = hidden_size SCREAMING_SNAKE_CASE_ : str = num_hidden_layers SCREAMING_SNAKE_CASE_ : Dict = num_attention_heads SCREAMING_SNAKE_CASE_ : Dict = hidden_act SCREAMING_SNAKE_CASE_ : Any = intermediate_size SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : List[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range SCREAMING_SNAKE_CASE_ : Optional[int] = layer_norm_eps SCREAMING_SNAKE_CASE_ : Union[str, Any] = position_embedding_type SCREAMING_SNAKE_CASE_ : int = use_cache
318
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ : Union[str, Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''的''', '''价''', '''格''', '''是''', '''15''', '''便''', '''alex''', '''##andra''', ''',''', '''。''', '''-''', '''t''', '''shirt''', ] SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens])) SCREAMING_SNAKE_CASE_ : Dict = { '''do_resize''': True, '''size''': {'''height''': 224, '''width''': 224}, '''do_center_crop''': True, '''crop_size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], '''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], '''do_convert_rgb''': True, } SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , lowercase_) with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp: json.dump(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowercase_ : str): '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowercase_ : List[Any]): '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowercase_ : str): '''simple docstring''' return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] SCREAMING_SNAKE_CASE_ : Dict = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor() SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) processor_slow.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_) SCREAMING_SNAKE_CASE_ : Any = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) processor_fast.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , lowercase_) self.assertIsInstance(processor_fast.tokenizer , lowercase_) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , lowercase_) self.assertIsInstance(processor_fast.image_processor , lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''') SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor(do_normalize=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=lowercase_) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , lowercase_) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Any = image_processor(lowercase_ , return_tensors='''np''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(images=lowercase_ , return_tensors='''np''') for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : str = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = '''Alexandra,T-shirt的价格是15便士。''' SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(lowercase_) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''Alexandra,T-shirt的价格是15便士。''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : int = processor(text=lowercase_ , images=lowercase_) self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values''']) # test if it raises when no input is passed with pytest.raises(lowercase_): processor() def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE_ : Optional[int] = processor.batch_decode(lowercase_) SCREAMING_SNAKE_CASE_ : Dict = tokenizer.batch_decode(lowercase_) self.assertListEqual(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Dict = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = '''Alexandra,T-shirt的价格是15便士。''' SCREAMING_SNAKE_CASE_ : Dict = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Dict = processor(text=lowercase_ , images=lowercase_) self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
318
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "rwkv" __UpperCamelCase = {"max_position_embeddings": "context_length"} def __init__( self : Union[str, Any] , lowercase_ : Any=50277 , lowercase_ : str=1024 , lowercase_ : List[str]=4096 , lowercase_ : Optional[Any]=32 , lowercase_ : Any=None , lowercase_ : Any=None , lowercase_ : List[Any]=1e-5 , lowercase_ : Union[str, Any]=0 , lowercase_ : Union[str, Any]=0 , lowercase_ : int=6 , lowercase_ : Tuple=False , lowercase_ : Any=True , **lowercase_ : Any , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size SCREAMING_SNAKE_CASE_ : Any = context_length SCREAMING_SNAKE_CASE_ : int = hidden_size SCREAMING_SNAKE_CASE_ : int = num_hidden_layers SCREAMING_SNAKE_CASE_ : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size SCREAMING_SNAKE_CASE_ : int = intermediate_size if intermediate_size is not None else 4 * hidden_size SCREAMING_SNAKE_CASE_ : int = layer_norm_epsilon SCREAMING_SNAKE_CASE_ : Optional[int] = rescale_every SCREAMING_SNAKE_CASE_ : Dict = use_cache SCREAMING_SNAKE_CASE_ : Dict = bos_token_id SCREAMING_SNAKE_CASE_ : Any = eos_token_id super().__init__( tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
318
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "rwkv" __UpperCamelCase = {"max_position_embeddings": "context_length"} def __init__( self : Union[str, Any] , lowercase_ : Any=50277 , lowercase_ : str=1024 , lowercase_ : List[str]=4096 , lowercase_ : Optional[Any]=32 , lowercase_ : Any=None , lowercase_ : Any=None , lowercase_ : List[Any]=1e-5 , lowercase_ : Union[str, Any]=0 , lowercase_ : Union[str, Any]=0 , lowercase_ : int=6 , lowercase_ : Tuple=False , lowercase_ : Any=True , **lowercase_ : Any , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size SCREAMING_SNAKE_CASE_ : Any = context_length SCREAMING_SNAKE_CASE_ : int = hidden_size SCREAMING_SNAKE_CASE_ : int = num_hidden_layers SCREAMING_SNAKE_CASE_ : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size SCREAMING_SNAKE_CASE_ : int = intermediate_size if intermediate_size is not None else 4 * hidden_size SCREAMING_SNAKE_CASE_ : int = layer_norm_epsilon SCREAMING_SNAKE_CASE_ : Optional[int] = rescale_every SCREAMING_SNAKE_CASE_ : Dict = use_cache SCREAMING_SNAKE_CASE_ : Dict = bos_token_id SCREAMING_SNAKE_CASE_ : Any = eos_token_id super().__init__( tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
318
1
"""simple docstring""" from __future__ import annotations def _A (__a , __a ) -> list[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = 0 SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(__a ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: SCREAMING_SNAKE_CASE_ : Dict = i + 1 else: SCREAMING_SNAKE_CASE_ : List[str] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
318
"""simple docstring""" UpperCAmelCase_ : Optional[int] = 8.3_1_4_4_5_9_8 def _A (__a , __a ) -> float: """simple docstring""" if temperature < 0: raise Exception('''Temperature cannot be less than 0 K''' ) if molar_mass <= 0: raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example UpperCAmelCase_ : str = 300 UpperCAmelCase_ : str = 28 UpperCAmelCase_ : Any = rms_speed_of_molecule(temperature, molar_mass) print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
318
1
"""simple docstring""" import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Union[str, Any] , lowercase_ : Optional[Any]=0.01 , lowercase_ : List[str]=1000): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = p_stop SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_length def __iter__( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = 0 SCREAMING_SNAKE_CASE_ : List[str] = False while not stop and count < self.max_length: yield count count += 1 SCREAMING_SNAKE_CASE_ : Any = random.random() < self.p_stop class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : List[str]=False , lowercase_ : Tuple=True): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = [ BatchSamplerShard(lowercase_ , 2 , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_) for i in range(2) ] SCREAMING_SNAKE_CASE_ : Union[str, Any] = [list(lowercase_) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(lowercase_) for shard in batch_sampler_shards] , [len(lowercase_) for e in expected]) self.assertListEqual(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(24) , batch_size=3 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(24) , batch_size=3 , drop_last=lowercase_) # Expected shouldn't change self.check_batch_sampler_shards(lowercase_ , lowercase_) # Check the shards when the dataset is a round multiple of batch size but not total batch size. SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(21) , batch_size=3 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = BatchSampler(range(21) , batch_size=3 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : int = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(22) , batch_size=3 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(22) , batch_size=3 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(20) , batch_size=3 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(20) , batch_size=3 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_) # Check the shards when the dataset is very small. SCREAMING_SNAKE_CASE_ : Dict = BatchSampler(range(2) , batch_size=3 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchSampler(range(2) , batch_size=3 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = [[], []] self.check_batch_sampler_shards(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = BatchSampler(range(24) , batch_size=4 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_) SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(24) , batch_size=4 , drop_last=lowercase_) # Expected shouldn't change self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_) # Check the shards when the dataset is not a round multiple of batch size. SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(22) , batch_size=4 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = BatchSampler(range(22) , batch_size=4 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_) # Check the shards when the dataset is not a round multiple of batch size or num_processes. SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(21) , batch_size=4 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_) SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(21) , batch_size=4 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_) # Check the shards when the dataset is very small. SCREAMING_SNAKE_CASE_ : Dict = BatchSampler(range(2) , batch_size=4 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : str = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_) SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(2) , batch_size=4 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : int = [[], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(24) , batch_size=3 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_) SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(24) , batch_size=3 , drop_last=lowercase_) # Expected shouldn't change self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_) # Check the shards when the dataset is a round multiple of batch size but not total batch size. SCREAMING_SNAKE_CASE_ : Optional[int] = BatchSampler(range(21) , batch_size=3 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_) SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(21) , batch_size=3 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. SCREAMING_SNAKE_CASE_ : List[Any] = BatchSampler(range(22) , batch_size=3 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_) SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(22) , batch_size=3 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : int = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. SCREAMING_SNAKE_CASE_ : List[Any] = BatchSampler(range(20) , batch_size=3 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = BatchSampler(range(20) , batch_size=3 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_) # Check the shards when the dataset is very small. SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(2) , batch_size=3 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = [[[0, 1]], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_) SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(2) , batch_size=3 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = [[], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ , even_batches=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = BatchSampler(range(24) , batch_size=4 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchSampler(range(24) , batch_size=4 , drop_last=lowercase_) # Expected shouldn't change self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_) # Check the shards when the dataset is not a round multiple of batch size. SCREAMING_SNAKE_CASE_ : List[Any] = BatchSampler(range(22) , batch_size=4 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = BatchSampler(range(22) , batch_size=4 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_) # Check the shards when the dataset is not a round multiple of batch size or num_processes. SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(21) , batch_size=4 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = BatchSampler(range(21) , batch_size=4 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_) # Check the shards when the dataset is very small. SCREAMING_SNAKE_CASE_ : Optional[int] = BatchSampler(range(2) , batch_size=4 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = [[[0, 1]], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = BatchSampler(range(2) , batch_size=4 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = [[], []] self.check_batch_sampler_shards(lowercase_ , lowercase_ , split_batches=lowercase_ , even_batches=lowercase_) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] SCREAMING_SNAKE_CASE_ : Optional[Any] = [BatchSamplerShard(lowercase_ , 2 , lowercase_ , even_batches=lowercase_) for i in range(2)] self.assertEqual(len(batch_sampler_shards[0]) , 3) self.assertEqual(len(batch_sampler_shards[1]) , 2) self.assertListEqual(list(batch_sampler_shards[0]) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]]) self.assertListEqual(list(batch_sampler_shards[1]) , [[3, 4], [9, 10, 11]]) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Dict=False , lowercase_ : Dict=2 , lowercase_ : List[Any]=False): '''simple docstring''' random.seed(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = list(lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = [ IterableDatasetShard( lowercase_ , batch_size=lowercase_ , drop_last=lowercase_ , num_processes=lowercase_ , process_index=lowercase_ , split_batches=lowercase_ , ) for i in range(lowercase_) ] SCREAMING_SNAKE_CASE_ : Any = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(lowercase_) iterable_dataset_lists.append(list(lowercase_)) SCREAMING_SNAKE_CASE_ : Any = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size SCREAMING_SNAKE_CASE_ : int = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(lowercase_) , len(lowercase_)) self.assertTrue(len(lowercase_) % shard_batch_size == 0) SCREAMING_SNAKE_CASE_ : Dict = [] for idx in range(0 , len(lowercase_) , lowercase_): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(lowercase_) < len(lowercase_): reference += reference self.assertListEqual(lowercase_ , reference[: len(lowercase_)]) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = 42 SCREAMING_SNAKE_CASE_ : List[Any] = RandomIterableDataset() self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_) # Edge case with a very small dataset SCREAMING_SNAKE_CASE_ : List[str] = RandomIterableDataset(max_length=2) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_) self.check_iterable_dataset_shards(lowercase_ , lowercase_ , batch_size=4 , drop_last=lowercase_ , split_batches=lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = BatchSampler(range(16) , batch_size=4 , drop_last=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = SkipBatchSampler(lowercase_ , 2) self.assertListEqual(list(lowercase_) , [[8, 9, 10, 11], [12, 13, 14, 15]]) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = SkipDataLoader(list(range(16)) , batch_size=4 , skip_batches=2) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]]) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(list(range(16)) , batch_size=4) SCREAMING_SNAKE_CASE_ : str = skip_first_batches(lowercase_ , num_batches=2) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]]) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoaderShard(list(range(16)) , batch_size=4) for idx, _ in enumerate(lowercase_): self.assertEqual(dataloader.end_of_dataloader , idx == 3) # Test it also works on the second iteration for idx, _ in enumerate(lowercase_): self.assertEqual(dataloader.end_of_dataloader , idx == 3) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' Accelerator() SCREAMING_SNAKE_CASE_ : Dict = DataLoaderDispatcher(range(16) , batch_size=4) for idx, _ in enumerate(lowercase_): self.assertEqual(dataloader.end_of_dataloader , idx == 3) # Test it also works on the second iteration for idx, _ in enumerate(lowercase_): self.assertEqual(dataloader.end_of_dataloader , idx == 3)
318
"""simple docstring""" import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList UpperCAmelCase_ : Union[str, Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""] class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : Dict=1): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer SCREAMING_SNAKE_CASE_ : Optional[int] = dataset SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowercase_) if n_tasks is None else n_tasks SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies def __iter__( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = [] for task in range(self.n_tasks): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip()) SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''') for task in range(self.n_tasks): for _ in range(self.n_copies): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = start_length SCREAMING_SNAKE_CASE_ : List[Any] = eof_strings SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer def __call__( self : Optional[int] , lowercase_ : Any , lowercase_ : int , **lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.tokenizer.batch_decode(input_ids[:, self.start_length :]) SCREAMING_SNAKE_CASE_ : Tuple = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings)) return all(lowercase_) def _A (__a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = re.split('''(%s)''' % '''|'''.join(__a ) , __a ) # last string should be "" return "".join(string_list[:-2] ) def _A (__a , __a , __a , __a , __a , __a=20 , **__a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = defaultdict(__a ) # dict of list of generated tokens for step, batch in tqdm(enumerate(__a ) ): with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Optional[int] = batch['''ids'''].shape[-1] SCREAMING_SNAKE_CASE_ : Tuple = accelerator.unwrap_model(__a ).generate( input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__a , **__a ) # each task is generated batch_size times SCREAMING_SNAKE_CASE_ : List[Any] = batch['''task_id'''].repeat(__a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.pad_across_processes( __a , dim=1 , pad_index=tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) ) SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy() SCREAMING_SNAKE_CASE_ : Optional[Any] = generated_tasks.cpu().numpy() for task, generated_tokens in zip(__a , __a ): gen_token_dict[task].append(__a ) SCREAMING_SNAKE_CASE_ : int = [[] for _ in range(__a )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) code_gens[task].append(remove_last_block(__a ) ) return code_gens def _A () -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser(__a ) SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric SCREAMING_SNAKE_CASE_ : Any = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing SCREAMING_SNAKE_CASE_ : str = '''false''' if args.num_workers is None: SCREAMING_SNAKE_CASE_ : Optional[Any] = multiprocessing.cpu_count() # Use dataset load to feed to accelerate SCREAMING_SNAKE_CASE_ : Tuple = Accelerator() set_seed(args.seed , device_specific=__a ) # Load model and tokenizer SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt ) SCREAMING_SNAKE_CASE_ : Dict = tokenizer.eos_token SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings SCREAMING_SNAKE_CASE_ : List[str] = { '''do_sample''': args.do_sample, '''temperature''': args.temperature, '''max_new_tokens''': args.max_new_tokens, '''top_p''': args.top_p, '''top_k''': args.top_k, '''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ), } # Load evaluation dataset and metric SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset('''openai_humaneval''' ) SCREAMING_SNAKE_CASE_ : str = load_metric('''code_eval''' ) SCREAMING_SNAKE_CASE_ : int = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] ) SCREAMING_SNAKE_CASE_ : List[str] = args.n_samples // args.batch_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = TokenizedDataset(__a , human_eval['''test'''] , n_copies=__a , n_tasks=__a ) # do not confuse args.batch_size, which is actually the num_return_sequences SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(__a , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] ) except ValueError as exception: print( '''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`''' ''' flag to enable code evaluation.''' ) raise exception SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(__a , __a ) SCREAMING_SNAKE_CASE_ : List[Any] = complete_code( __a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , ) if accelerator.is_main_process: SCREAMING_SNAKE_CASE_ : int = [] for task in tqdm(range(__a ) ): SCREAMING_SNAKE_CASE_ : Tuple = human_eval['''test'''][task]['''test'''] SCREAMING_SNAKE_CASE_ : Tuple = f'check({human_eval["test"][task]["entry_point"]})' references.append('''\n''' + test_func + '''\n''' + entry_point ) # Evaluate completions with "code_eval" metric SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = code_eval_metric.compute( references=__a , predictions=__a , num_workers=args.num_workers ) print(f'Results: {pass_at_k}' ) # Save results to json file with open(args.output_file , '''w''' ) as fp: json.dump(__a , __a ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
318
1
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = '''ZinengTang/tvlt-base''' SCREAMING_SNAKE_CASE_ : Dict = tempfile.mkdtemp() def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowercase_ : List[Any]): '''simple docstring''' return TvltImageProcessor.from_pretrained(self.checkpoint , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowercase_ : List[str]): '''simple docstring''' return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Any = self.get_feature_extractor() SCREAMING_SNAKE_CASE_ : int = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_) processor.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : str = TvltProcessor.from_pretrained(self.tmpdirname) self.assertIsInstance(processor.feature_extractor , lowercase_) self.assertIsInstance(processor.image_processor , lowercase_) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Any = self.get_feature_extractor() SCREAMING_SNAKE_CASE_ : Union[str, Any] = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = np.ones([12000]) SCREAMING_SNAKE_CASE_ : List[Any] = feature_extractor(lowercase_ , return_tensors='''np''') SCREAMING_SNAKE_CASE_ : Any = processor(audio=lowercase_ , return_tensors='''np''') for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Any = self.get_feature_extractor() SCREAMING_SNAKE_CASE_ : Dict = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = np.ones([3, 224, 224]) SCREAMING_SNAKE_CASE_ : Dict = image_processor(lowercase_ , return_tensors='''np''') SCREAMING_SNAKE_CASE_ : str = processor(images=lowercase_ , return_tensors='''np''') for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_feature_extractor() SCREAMING_SNAKE_CASE_ : str = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = np.ones([12000]) SCREAMING_SNAKE_CASE_ : Dict = np.ones([3, 224, 224]) SCREAMING_SNAKE_CASE_ : List[str] = processor(audio=lowercase_ , images=lowercase_) self.assertListEqual(list(inputs.keys()) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask''']) # test if it raises when no input is passed with pytest.raises(lowercase_): processor() def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_feature_extractor() SCREAMING_SNAKE_CASE_ : Dict = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
318
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["image_processor", "feature_extractor"] __UpperCamelCase = "TvltImageProcessor" __UpperCamelCase = "TvltFeatureExtractor" def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]): '''simple docstring''' super().__init__(image_processor=lowercase_ , feature_extractor=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor SCREAMING_SNAKE_CASE_ : Optional[Any] = feature_extractor def __call__( self : Any , lowercase_ : str=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : str=None , lowercase_ : int=False , lowercase_ : Union[str, Any]=False , *lowercase_ : List[Any] , **lowercase_ : List[str] , ): '''simple docstring''' if images is None and audio is None: raise ValueError('''You need to specify either an `images` or `audio` input to process.''') SCREAMING_SNAKE_CASE_ : Any = None if images is not None: SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor(lowercase_ , mask_pixel=lowercase_ , *lowercase_ , **lowercase_) if images_mixed is not None: SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor(lowercase_ , is_mixed=lowercase_ , *lowercase_ , **lowercase_) if audio is not None: SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor( lowercase_ , *lowercase_ , sampling_rate=lowercase_ , mask_audio=lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = {} if audio is not None: output_dict.update(lowercase_) if images is not None: output_dict.update(lowercase_) if images_mixed_dict is not None: output_dict.update(lowercase_) return output_dict @property def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor.model_input_names SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
318
1
"""simple docstring""" from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def _A (__a ) -> Dict: """simple docstring""" if not is_accelerate_available(): return method SCREAMING_SNAKE_CASE_ : Any = version.parse(accelerate.__version__ ).base_version if version.parse(__a ) < version.parse('''0.17.0''' ): return method def wrapper(self , *__a , **__a ): if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ): self._hf_hook.pre_forward(self ) return method(self , *__a , **__a ) return wrapper
318
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "SpeechT5FeatureExtractor" __UpperCamelCase = "SpeechT5Tokenizer" def __init__( self : Any , lowercase_ : Dict , lowercase_ : Optional[Any]): '''simple docstring''' super().__init__(lowercase_ , lowercase_) def __call__( self : List[Any] , *lowercase_ : List[Any] , **lowercase_ : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''audio''' , lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('''text''' , lowercase_) SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('''text_target''' , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''audio_target''' , lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''sampling_rate''' , lowercase_) if audio is not None and text is not None: raise ValueError( '''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''') if audio_target is not None and text_target is not None: raise ValueError( '''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''') if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( '''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''') if audio is not None: SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_) elif text is not None: SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(lowercase_ , **lowercase_) else: SCREAMING_SNAKE_CASE_ : Any = None if audio_target is not None: SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor(audio_target=lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = targets['''input_values'''] elif text_target is not None: SCREAMING_SNAKE_CASE_ : int = self.tokenizer(lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = targets['''input_ids'''] else: SCREAMING_SNAKE_CASE_ : int = None if inputs is None: return targets if targets is not None: SCREAMING_SNAKE_CASE_ : Union[str, Any] = labels SCREAMING_SNAKE_CASE_ : Optional[Any] = targets.get('''attention_mask''') if decoder_attention_mask is not None: SCREAMING_SNAKE_CASE_ : Any = decoder_attention_mask return inputs def _SCREAMING_SNAKE_CASE ( self : Tuple , *lowercase_ : Tuple , **lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''input_values''' , lowercase_) SCREAMING_SNAKE_CASE_ : int = kwargs.pop('''input_ids''' , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''labels''' , lowercase_) if input_values is not None and input_ids is not None: raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''') if input_values is None and input_ids is None and labels is None: raise ValueError( '''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''') if input_values is not None: SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_) elif input_ids is not None: SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.pad(lowercase_ , **lowercase_) else: SCREAMING_SNAKE_CASE_ : List[Any] = None if labels is not None: if "input_ids" in labels or (isinstance(lowercase_ , lowercase_) and "input_ids" in labels[0]): SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer.pad(lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : Dict = targets['''input_ids'''] else: SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.feature_size SCREAMING_SNAKE_CASE_ : Optional[int] = self.feature_extractor.num_mel_bins SCREAMING_SNAKE_CASE_ : str = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : str = feature_size_hack SCREAMING_SNAKE_CASE_ : Dict = targets['''input_values'''] else: SCREAMING_SNAKE_CASE_ : List[Any] = None if inputs is None: return targets if targets is not None: SCREAMING_SNAKE_CASE_ : Dict = labels SCREAMING_SNAKE_CASE_ : List[str] = targets.get('''attention_mask''') if decoder_attention_mask is not None: SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_attention_mask return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : Tuple): '''simple docstring''' return self.tokenizer.batch_decode(*lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowercase_ : Dict , **lowercase_ : List[Any]): '''simple docstring''' return self.tokenizer.decode(*lowercase_ , **lowercase_)
318
1
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = LayoutLMTokenizer __UpperCamelCase = LayoutLMTokenizerFast __UpperCamelCase = True __UpperCamelCase = True def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' super().setUp() SCREAMING_SNAKE_CASE_ : List[str] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens])) def _SCREAMING_SNAKE_CASE ( self : Dict , **lowercase_ : List[str]): '''simple docstring''' return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = '''UNwant\u00E9d,running''' SCREAMING_SNAKE_CASE_ : Any = '''unwanted, running''' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = self.tokenizer_class(self.vocab_file) SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.tokenize('''UNwant\u00E9d,running''') self.assertListEqual(lowercase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing''']) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , [7, 4, 5, 10, 8, 9]) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' pass
318
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def _A (__a ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = np.inf def set_batch_size(__a ) -> None: nonlocal batch_size if isinstance(__a , __a ): SCREAMING_SNAKE_CASE_ : Tuple = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__a , __a ): SCREAMING_SNAKE_CASE_ : int = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__a , __a ) and feature.dtype == "binary": SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__a , __a ) return None if batch_size is np.inf else batch_size class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Any , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Optional[int] , ): '''simple docstring''' super().__init__( lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , ) SCREAMING_SNAKE_CASE_ : Any = path_or_paths if isinstance(lowercase_ , lowercase_) else {self.split: path_or_paths} SCREAMING_SNAKE_CASE_ : Any = _PACKAGED_DATASETS_MODULES['''parquet'''][1] SCREAMING_SNAKE_CASE_ : Union[str, Any] = Parquet( cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , hash=lowercase_ , **lowercase_ , ) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' if self.streaming: SCREAMING_SNAKE_CASE_ : str = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE_ : Optional[Any] = None SCREAMING_SNAKE_CASE_ : Optional[int] = None SCREAMING_SNAKE_CASE_ : Tuple = None SCREAMING_SNAKE_CASE_ : Dict = None self.builder.download_and_prepare( download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , ) SCREAMING_SNAKE_CASE_ : Any = self.builder.as_dataset( split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory) return dataset class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Tuple , lowercase_ : Dataset , lowercase_ : Union[PathLike, BinaryIO] , lowercase_ : Optional[int] = None , **lowercase_ : Dict , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = dataset SCREAMING_SNAKE_CASE_ : Dict = path_or_buf SCREAMING_SNAKE_CASE_ : List[Any] = batch_size or get_writer_batch_size(dataset.features) SCREAMING_SNAKE_CASE_ : Any = parquet_writer_kwargs def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike)): with open(self.path_or_buf , '''wb+''') as buffer: SCREAMING_SNAKE_CASE_ : Optional[Any] = self._write(file_obj=lowercase_ , batch_size=lowercase_ , **self.parquet_writer_kwargs) else: SCREAMING_SNAKE_CASE_ : str = self._write(file_obj=self.path_or_buf , batch_size=lowercase_ , **self.parquet_writer_kwargs) return written def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : BinaryIO , lowercase_ : int , **lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = 0 SCREAMING_SNAKE_CASE_ : Optional[int] = parquet_writer_kwargs.pop('''path_or_buf''' , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.dataset.features.arrow_schema SCREAMING_SNAKE_CASE_ : Tuple = pq.ParquetWriter(lowercase_ , schema=lowercase_ , **lowercase_) for offset in logging.tqdm( range(0 , len(self.dataset) , lowercase_) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): SCREAMING_SNAKE_CASE_ : List[Any] = query_table( table=self.dataset._data , key=slice(lowercase_ , offset + batch_size) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(lowercase_) written += batch.nbytes writer.close() return written
318
1
"""simple docstring""" from __future__ import annotations import time UpperCAmelCase_ : List[str] = list[tuple[int, int]] UpperCAmelCase_ : Tuple = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] UpperCAmelCase_ : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class lowerCAmelCase__ : '''simple docstring''' def __init__( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : Node | None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = pos_x SCREAMING_SNAKE_CASE_ : List[Any] = pos_y SCREAMING_SNAKE_CASE_ : Tuple = (pos_y, pos_x) SCREAMING_SNAKE_CASE_ : str = goal_x SCREAMING_SNAKE_CASE_ : str = goal_y SCREAMING_SNAKE_CASE_ : Tuple = parent class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Optional[Any] , lowercase_ : tuple[int, int] , lowercase_ : tuple[int, int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = Node(start[1] , start[0] , goal[1] , goal[0] , lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = [self.start] SCREAMING_SNAKE_CASE_ : Tuple = False def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' while self.node_queue: SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.node_queue.pop(0) if current_node.pos == self.target.pos: SCREAMING_SNAKE_CASE_ : List[Any] = True return self.retrace_path(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_successors(lowercase_) for node in successors: self.node_queue.append(lowercase_) if not self.reached: return [self.start.pos] return None def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Node): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [] for action in delta: SCREAMING_SNAKE_CASE_ : List[Any] = parent.pos_x + action[1] SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(lowercase_) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , lowercase_)) return successors def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Node | None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = node SCREAMING_SNAKE_CASE_ : Any = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x)) SCREAMING_SNAKE_CASE_ : Any = current_node.parent path.reverse() return path class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Optional[int] , lowercase_ : Tuple , lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = BreadthFirstSearch(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = BreadthFirstSearch(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : str = False def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: SCREAMING_SNAKE_CASE_ : Dict = self.fwd_bfs.node_queue.pop(0) SCREAMING_SNAKE_CASE_ : List[Any] = self.bwd_bfs.node_queue.pop(0) if current_bwd_node.pos == current_fwd_node.pos: SCREAMING_SNAKE_CASE_ : Optional[Any] = True return self.retrace_bidirectional_path( lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = current_bwd_node SCREAMING_SNAKE_CASE_ : Union[str, Any] = current_fwd_node SCREAMING_SNAKE_CASE_ : Tuple = { self.fwd_bfs: self.fwd_bfs.get_successors(lowercase_), self.bwd_bfs: self.bwd_bfs.get_successors(lowercase_), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(lowercase_) if not self.reached: return [self.fwd_bfs.start.pos] return None def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Node , lowercase_ : Node): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = self.fwd_bfs.retrace_path(lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = self.bwd_bfs.retrace_path(lowercase_) bwd_path.pop() bwd_path.reverse() SCREAMING_SNAKE_CASE_ : str = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() UpperCAmelCase_ : Optional[int] = (0, 0) UpperCAmelCase_ : List[str] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) UpperCAmelCase_ : Optional[Any] = time.time() UpperCAmelCase_ : Union[str, Any] = BreadthFirstSearch(init, goal) UpperCAmelCase_ : Tuple = bfs.search() UpperCAmelCase_ : Dict = time.time() - start_bfs_time print("""Unidirectional BFS computation time : """, bfs_time) UpperCAmelCase_ : str = time.time() UpperCAmelCase_ : List[Any] = BidirectionalBreadthFirstSearch(init, goal) UpperCAmelCase_ : Dict = bd_bfs.search() UpperCAmelCase_ : Any = time.time() - start_bd_bfs_time print("""Bidirectional BFS computation time : """, bd_bfs_time)
318
"""simple docstring""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : str = logging.get_logger(__name__) UpperCAmelCase_ : Optional[Any] = ["""model.decoder.embed_positions.weights"""] def _A (__a ) -> Dict: """simple docstring""" if "emb" in name: SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''emb''' , '''model.decoder.embed_tokens''' ) if "transformer" in name: SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''transformer''' , '''model.decoder''' ) if "cross_attention" in name: SCREAMING_SNAKE_CASE_ : str = name.replace('''cross_attention''' , '''encoder_attn''' ) if "linear1" in name: SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''linear1''' , '''fc1''' ) if "linear2" in name: SCREAMING_SNAKE_CASE_ : str = name.replace('''linear2''' , '''fc2''' ) if "norm1" in name: SCREAMING_SNAKE_CASE_ : Any = name.replace('''norm1''' , '''self_attn_layer_norm''' ) if "norm_cross" in name: SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' ) if "norm2" in name: SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''norm2''' , '''final_layer_norm''' ) if "out_norm" in name: SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''out_norm''' , '''model.decoder.layer_norm''' ) if "linears" in name: SCREAMING_SNAKE_CASE_ : Dict = name.replace('''linears''' , '''lm_heads''' ) if "condition_provider.conditioners.description.output_proj" in name: SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' ) return name def _A (__a , __a ) -> Tuple[Dict, Dict]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(state_dict.keys() ) SCREAMING_SNAKE_CASE_ : int = {} for key in keys: SCREAMING_SNAKE_CASE_ : int = state_dict.pop(__a ) SCREAMING_SNAKE_CASE_ : int = rename_keys(__a ) if "in_proj_weight" in key: # split fused qkv proj SCREAMING_SNAKE_CASE_ : List[str] = val[:hidden_size, :] SCREAMING_SNAKE_CASE_ : List[str] = val[hidden_size : 2 * hidden_size, :] SCREAMING_SNAKE_CASE_ : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: SCREAMING_SNAKE_CASE_ : int = val else: SCREAMING_SNAKE_CASE_ : Any = val return state_dict, enc_dec_proj_state_dict def _A (__a ) -> MusicgenDecoderConfig: """simple docstring""" if checkpoint == "small": # default config values SCREAMING_SNAKE_CASE_ : Optional[int] = 10_24 SCREAMING_SNAKE_CASE_ : Tuple = 24 SCREAMING_SNAKE_CASE_ : Optional[Any] = 16 elif checkpoint == "medium": SCREAMING_SNAKE_CASE_ : List[str] = 15_36 SCREAMING_SNAKE_CASE_ : Optional[int] = 48 SCREAMING_SNAKE_CASE_ : Optional[int] = 24 elif checkpoint == "large": SCREAMING_SNAKE_CASE_ : Optional[Any] = 20_48 SCREAMING_SNAKE_CASE_ : Optional[int] = 48 SCREAMING_SNAKE_CASE_ : int = 32 else: raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' ) SCREAMING_SNAKE_CASE_ : List[Any] = MusicgenDecoderConfig( hidden_size=__a , ffn_dim=hidden_size * 4 , num_hidden_layers=__a , num_attention_heads=__a , ) return config @torch.no_grad() def _A (__a , __a=None , __a=None , __a="cpu" ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = MusicGen.get_pretrained(__a , device=__a ) SCREAMING_SNAKE_CASE_ : Dict = decoder_config_from_checkpoint(__a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = fairseq_model.lm.state_dict() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = rename_state_dict( __a , hidden_size=decoder_config.hidden_size ) SCREAMING_SNAKE_CASE_ : Optional[Any] = TaEncoderModel.from_pretrained('''t5-base''' ) SCREAMING_SNAKE_CASE_ : List[str] = EncodecModel.from_pretrained('''facebook/encodec_32khz''' ) SCREAMING_SNAKE_CASE_ : int = MusicgenForCausalLM(__a ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = decoder.load_state_dict(__a , strict=__a ) for key in missing_keys.copy(): if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__a ) if len(__a ) > 0: raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' ) if len(__a ) > 0: raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' ) # init the composite model SCREAMING_SNAKE_CASE_ : str = MusicgenForConditionalGeneration(text_encoder=__a , audio_encoder=__a , decoder=__a ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__a ) # check we can do a forward pass SCREAMING_SNAKE_CASE_ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=__a , decoder_input_ids=__a ).logits if logits.shape != (8, 1, 20_48): raise ValueError('''Incorrect shape for logits''' ) # now construct the processor SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained('''t5-base''' ) SCREAMING_SNAKE_CASE_ : str = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' ) SCREAMING_SNAKE_CASE_ : Tuple = MusicgenProcessor(feature_extractor=__a , tokenizer=__a ) # set the appropriate bos/pad token ids SCREAMING_SNAKE_CASE_ : str = 20_48 SCREAMING_SNAKE_CASE_ : List[Any] = 20_48 # set other default generation config params SCREAMING_SNAKE_CASE_ : int = int(30 * audio_encoder.config.frame_rate ) SCREAMING_SNAKE_CASE_ : str = True SCREAMING_SNAKE_CASE_ : Optional[Any] = 3.0 if pytorch_dump_folder is not None: Path(__a ).mkdir(exist_ok=__a ) logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' ) model.save_pretrained(__a ) processor.save_pretrained(__a ) if repo_id: logger.info(f'Pushing model {checkpoint} to {repo_id}' ) model.push_to_hub(__a ) processor.push_to_hub(__a ) if __name__ == "__main__": UpperCAmelCase_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint""", default="""small""", type=str, help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""", ) parser.add_argument( """--pytorch_dump_folder""", required=True, default=None, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) parser.add_argument( """--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda.""" ) UpperCAmelCase_ : Dict = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
318
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ : Dict = { """configuration_bigbird_pegasus""": [ """BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BigBirdPegasusConfig""", """BigBirdPegasusOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : str = [ """BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""", """BigBirdPegasusForCausalLM""", """BigBirdPegasusForConditionalGeneration""", """BigBirdPegasusForQuestionAnswering""", """BigBirdPegasusForSequenceClassification""", """BigBirdPegasusModel""", """BigBirdPegasusPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
318
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def _A (__a ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b def _A (__a ) -> np.ndarray: """simple docstring""" return (gray > 1_27) & (gray <= 2_55) def _A (__a , __a ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros_like(__a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image SCREAMING_SNAKE_CASE_ : Union[str, Any] = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): SCREAMING_SNAKE_CASE_ : Optional[Any] = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() SCREAMING_SNAKE_CASE_ : Any = int(summation > 0 ) return output if __name__ == "__main__": # read original image UpperCAmelCase_ : Dict = Path(__file__).resolve().parent / """image_data""" / """lena.jpg""" UpperCAmelCase_ : List[Any] = np.array(Image.open(lena_path)) # kernel to be applied UpperCAmelCase_ : Any = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) UpperCAmelCase_ : Tuple = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image UpperCAmelCase_ : List[str] = Image.fromarray(output).convert("""RGB""") pil_img.save("""result_dilation.png""")
318
1
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''') SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''').to(lowercase_) SCREAMING_SNAKE_CASE_ : int = -1 SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowercase_) SCREAMING_SNAKE_CASE_ : Any = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_) SCREAMING_SNAKE_CASE_ : str = tokenizer.decode(greedy_ids[0]) with CaptureStdout() as cs: SCREAMING_SNAKE_CASE_ : str = TextStreamer(lowercase_) model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_) # The greedy text should be printed to stdout, except for the final "\n" in the streamer SCREAMING_SNAKE_CASE_ : Dict = cs.out[:-1] self.assertEqual(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''') SCREAMING_SNAKE_CASE_ : int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''').to(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = -1 SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowercase_) SCREAMING_SNAKE_CASE_ : str = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.decode(greedy_ids[0]) SCREAMING_SNAKE_CASE_ : Optional[Any] = TextIteratorStreamer(lowercase_) SCREAMING_SNAKE_CASE_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} SCREAMING_SNAKE_CASE_ : Tuple = Thread(target=model.generate , kwargs=lowercase_) thread.start() SCREAMING_SNAKE_CASE_ : Tuple = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''') SCREAMING_SNAKE_CASE_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''').to(lowercase_) SCREAMING_SNAKE_CASE_ : str = -1 SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowercase_) SCREAMING_SNAKE_CASE_ : str = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = greedy_ids[:, input_ids.shape[1] :] SCREAMING_SNAKE_CASE_ : int = tokenizer.decode(new_greedy_ids[0]) with CaptureStdout() as cs: SCREAMING_SNAKE_CASE_ : Dict = TextStreamer(lowercase_ , skip_prompt=lowercase_) model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_) # The greedy text should be printed to stdout, except for the final "\n" in the streamer SCREAMING_SNAKE_CASE_ : List[Any] = cs.out[:-1] self.assertEqual(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = AutoTokenizer.from_pretrained('''distilgpt2''') SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''').to(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = -1 SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.ones((1, 5) , device=lowercase_).long() * model.config.bos_token_id with CaptureStdout() as cs: SCREAMING_SNAKE_CASE_ : str = TextStreamer(lowercase_ , skip_special_tokens=lowercase_) model.generate(lowercase_ , max_new_tokens=1 , do_sample=lowercase_ , streamer=lowercase_) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token SCREAMING_SNAKE_CASE_ : List[Any] = cs.out[:-1] # Remove the final "\n" SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(lowercase_ , return_tensors='''pt''') self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''') SCREAMING_SNAKE_CASE_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''').to(lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = -1 SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = TextIteratorStreamer(lowercase_ , timeout=0.0_01) SCREAMING_SNAKE_CASE_ : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} SCREAMING_SNAKE_CASE_ : List[Any] = Thread(target=model.generate , kwargs=lowercase_) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(lowercase_): SCREAMING_SNAKE_CASE_ : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text
318
"""simple docstring""" from collections import defaultdict def _A (__a , __a ) -> bool: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = first_str.lower().strip() SCREAMING_SNAKE_CASE_ : List[Any] = second_str.lower().strip() # Remove whitespace SCREAMING_SNAKE_CASE_ : Dict = first_str.replace(''' ''' , '''''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(__a ) != len(__a ): return False # Default values for count should be 0 SCREAMING_SNAKE_CASE_ : defaultdict[str, int] = defaultdict(__a ) # For each character in input strings, # increment count in the corresponding for i in range(len(__a ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ : Any = input("""Enter the first string """).strip() UpperCAmelCase_ : Optional[int] = input("""Enter the second string """).strip() UpperCAmelCase_ : Union[str, Any] = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
318
1
"""simple docstring""" from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def _A (__a , __a , __a=1e-12 ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T SCREAMING_SNAKE_CASE_ : List[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T return jnp.matmul(__a , norm_emb_a.T ) class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = jnp.floataa def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxCLIPVisionModule(self.config.vision_config) SCREAMING_SNAKE_CASE_ : Tuple = nn.Dense(self.config.projection_dim , use_bias=lowercase_ , dtype=self.dtype) SCREAMING_SNAKE_CASE_ : List[str] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param( '''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim)) SCREAMING_SNAKE_CASE_ : Dict = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,)) def __call__( self : Optional[Any] , lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.vision_model(lowercase_)[1] SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.visual_projection(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.special_care_embeds) SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.concept_embeds) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs SCREAMING_SNAKE_CASE_ : Tuple = 0.0 SCREAMING_SNAKE_CASE_ : Dict = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.round(lowercase_ , 3) SCREAMING_SNAKE_CASE_ : List[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=lowercase_) # Use a lower threshold if an image has any special care concept SCREAMING_SNAKE_CASE_ : Dict = is_special_care * 0.01 SCREAMING_SNAKE_CASE_ : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment SCREAMING_SNAKE_CASE_ : Any = jnp.round(lowercase_ , 3) SCREAMING_SNAKE_CASE_ : Dict = jnp.any(concept_scores > 0 , axis=1) return has_nsfw_concepts class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = CLIPConfig __UpperCamelCase = "clip_input" __UpperCamelCase = FlaxStableDiffusionSafetyCheckerModule def __init__( self : Union[str, Any] , lowercase_ : CLIPConfig , lowercase_ : Optional[Tuple] = None , lowercase_ : int = 0 , lowercase_ : jnp.dtype = jnp.floataa , lowercase_ : bool = True , **lowercase_ : Any , ): '''simple docstring''' if input_shape is None: SCREAMING_SNAKE_CASE_ : List[str] = (1, 224, 224, 3) SCREAMING_SNAKE_CASE_ : List[Any] = self.module_class(config=lowercase_ , dtype=lowercase_ , **lowercase_) super().__init__(lowercase_ , lowercase_ , input_shape=lowercase_ , seed=lowercase_ , dtype=lowercase_ , _do_init=_do_init) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : jax.random.KeyArray , lowercase_ : Tuple , lowercase_ : FrozenDict = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = jax.random.normal(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.random.split(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = {'''params''': params_rng, '''dropout''': dropout_rng} SCREAMING_SNAKE_CASE_ : List[Any] = self.module.init(lowercase_ , lowercase_)['''params'''] return random_params def __call__( self : List[Any] , lowercase_ : List[str] , lowercase_ : dict = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = jnp.transpose(lowercase_ , (0, 2, 3, 1)) return self.module.apply( {'''params''': params or self.params} , jnp.array(lowercase_ , dtype=jnp.floataa) , rngs={} , )
318
"""simple docstring""" import argparse from collections import defaultdict import yaml UpperCAmelCase_ : Optional[Any] = """docs/source/en/_toctree.yml""" def _A (__a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = defaultdict(__a ) for doc in model_doc: counts[doc["local"]] += 1 SCREAMING_SNAKE_CASE_ : List[Any] = [key for key, value in counts.items() if value > 1] SCREAMING_SNAKE_CASE_ : int = [] for duplicate_key in duplicates: SCREAMING_SNAKE_CASE_ : List[str] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(__a ) > 1: raise ValueError( f'{duplicate_key} is present several times in the documentation table of content at ' '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(__a , key=lambda __a : s["title"].lower() ) def _A (__a=False ) -> Tuple: """simple docstring""" with open(__a , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE_ : Dict = yaml.safe_load(f.read() ) # Get to the API doc SCREAMING_SNAKE_CASE_ : Any = 0 while content[api_idx]["title"] != "API": api_idx += 1 SCREAMING_SNAKE_CASE_ : str = content[api_idx]['''sections'''] # Then to the model doc SCREAMING_SNAKE_CASE_ : List[Any] = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 SCREAMING_SNAKE_CASE_ : Optional[int] = api_doc[model_idx]['''sections'''] SCREAMING_SNAKE_CASE_ : str = [(idx, section) for idx, section in enumerate(__a ) if '''sections''' in section] SCREAMING_SNAKE_CASE_ : Optional[Any] = False for idx, modality_doc in modalities_docs: SCREAMING_SNAKE_CASE_ : List[str] = modality_doc['''sections'''] SCREAMING_SNAKE_CASE_ : Union[str, Any] = clean_model_doc_toc(__a ) if old_modality_doc != new_modality_doc: SCREAMING_SNAKE_CASE_ : str = True if overwrite: SCREAMING_SNAKE_CASE_ : Optional[int] = new_modality_doc if diff: if overwrite: SCREAMING_SNAKE_CASE_ : List[Any] = model_doc SCREAMING_SNAKE_CASE_ : int = api_doc with open(__a , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(__a , allow_unicode=__a ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": UpperCAmelCase_ : List[str] = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCAmelCase_ : Tuple = parser.parse_args() check_model_doc(args.fix_and_overwrite)
318
1
"""simple docstring""" import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def _A (__a ) -> float: """simple docstring""" return np.dot(__a , __a ) class lowerCAmelCase__ : '''simple docstring''' def __init__( self : int , *, lowercase_ : float = np.inf , lowercase_ : str = "linear" , lowercase_ : float = 0.0 , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = regularization SCREAMING_SNAKE_CASE_ : Tuple = gamma if kernel == "linear": SCREAMING_SNAKE_CASE_ : Dict = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError('''rbf kernel requires gamma''') if not isinstance(self.gamma , (float, int)): raise ValueError('''gamma must be float or int''') if not self.gamma > 0: raise ValueError('''gamma must be > 0''') SCREAMING_SNAKE_CASE_ : List[Any] = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: SCREAMING_SNAKE_CASE_ : Optional[Any] = F'Unknown kernel: {kernel}' raise ValueError(lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : ndarray , lowercase_ : ndarray): '''simple docstring''' return np.dot(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : ndarray , lowercase_ : ndarray): '''simple docstring''' return np.exp(-(self.gamma * norm_squared(vectora - vectora))) def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : list[ndarray] , lowercase_ : ndarray): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = observations SCREAMING_SNAKE_CASE_ : int = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((SCREAMING_SNAKE_CASE_) , ) : Optional[int] = np.shape(lowercase_) def to_minimize(lowercase_ : ndarray) -> float: SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0 ((SCREAMING_SNAKE_CASE_) , ) : Optional[int] = np.shape(lowercase_) for i in range(lowercase_): for j in range(lowercase_): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j]) ) return 1 / 2 * s - sum(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = LinearConstraint(lowercase_ , 0 , 0) SCREAMING_SNAKE_CASE_ : List[str] = Bounds(0 , self.regularization) SCREAMING_SNAKE_CASE_ : Union[str, Any] = minimize( lowercase_ , np.ones(lowercase_) , bounds=lowercase_ , constraints=[ly_contraint]).x SCREAMING_SNAKE_CASE_ : Optional[Any] = l_star # calculating mean offset of separation plane to points SCREAMING_SNAKE_CASE_ : Dict = 0 for i in range(lowercase_): for j in range(lowercase_): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j]) SCREAMING_SNAKE_CASE_ : Optional[int] = s / n def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : ndarray): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , lowercase_) for n in range(len(self.classes))) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
318
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
318
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "microsoft/speecht5_tts" __UpperCamelCase = ( "This is a tool that reads an English text out loud. It takes an input named `text` which should contain the " "text to read (in English) and returns a waveform object containing the sound." ) __UpperCamelCase = "text_reader" __UpperCamelCase = SpeechTaProcessor __UpperCamelCase = SpeechTaForTextToSpeech __UpperCamelCase = SpeechTaHifiGan __UpperCamelCase = ["text"] __UpperCamelCase = ["audio"] def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' if self.post_processor is None: SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''microsoft/speecht5_hifigan''' super().setup() def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict=None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.pre_processor(text=lowercase_ , return_tensors='''pt''' , truncation=lowercase_) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''') SCREAMING_SNAKE_CASE_ : int = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''') SCREAMING_SNAKE_CASE_ : str = torch.tensor(embeddings_dataset[7305]['''xvector''']).unsqueeze(0) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Union[str, Any]): '''simple docstring''' with torch.no_grad(): return self.model.generate_speech(**lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : List[str]): '''simple docstring''' with torch.no_grad(): return self.post_processor(lowercase_).cpu().detach()
318
"""simple docstring""" from __future__ import annotations UpperCAmelCase_ : List[str] = list[list[int]] # assigning initial values to the grid UpperCAmelCase_ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCAmelCase_ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def _A (__a , __a , __a , __a ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def _A (__a ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def _A (__a ) -> Matrix | None: """simple docstring""" if location := find_empty_location(__a ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__a , __a , __a , __a ): SCREAMING_SNAKE_CASE_ : Tuple = digit if sudoku(__a ) is not None: return grid SCREAMING_SNAKE_CASE_ : Any = 0 return None def _A (__a ) -> None: """simple docstring""" for row in grid: for cell in row: print(__a , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") UpperCAmelCase_ : str = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
318
1
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase_ : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = XLNetTokenizer __UpperCamelCase = XLNetTokenizerFast __UpperCamelCase = True __UpperCamelCase = True def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE_ : Optional[int] = XLNetTokenizer(lowercase_ , keep_accents=lowercase_) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = '''<s>''' SCREAMING_SNAKE_CASE_ : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_) , lowercase_) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_) , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''<unk>''') self.assertEqual(vocab_keys[1] , '''<s>''') self.assertEqual(vocab_keys[-1] , '''<eod>''') self.assertEqual(len(lowercase_) , 1006) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1000) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = XLNetTokenizer(lowercase_ , keep_accents=lowercase_) SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize('''This is a test''') self.assertListEqual(lowercase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , [285, 46, 10, 170, 382]) SCREAMING_SNAKE_CASE_ : str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) SCREAMING_SNAKE_CASE_ : Any = tokenizer.convert_tokens_to_ids(lowercase_) self.assertListEqual(lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4]) SCREAMING_SNAKE_CASE_ : str = tokenizer.convert_ids_to_tokens(lowercase_) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = XLNetTokenizer(lowercase_ , do_lower_case=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + '''''', '''i''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ] , ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''▁he''', '''ll''', '''o''']) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = XLNetTokenizer(lowercase_ , do_lower_case=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ] , ) @slow def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = XLNetTokenizer.from_pretrained('''xlnet-base-cased''') SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = {'''input_ids''': [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_ , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
318
"""simple docstring""" from itertools import permutations def _A (__a ) -> bool: """simple docstring""" if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False SCREAMING_SNAKE_CASE_ : List[str] = [7, 11, 13, 17] for i, test in enumerate(__a ): if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def _A (__a = 10 ) -> int: """simple docstring""" return sum( int(''''''.join(map(__a , __a ) ) ) for num in permutations(range(__a ) ) if is_substring_divisible(__a ) ) if __name__ == "__main__": print(f'''{solution() = }''')
318
1
"""simple docstring""" def _A (__a , __a , __a ) -> float: """simple docstring""" if principal <= 0: raise Exception('''Principal borrowed must be > 0''' ) if rate_per_annum < 0: raise Exception('''Rate of interest must be >= 0''' ) if years_to_repay <= 0 or not isinstance(__a , __a ): raise Exception('''Years to repay must be an integer > 0''' ) # Yearly rate is divided by 12 to get monthly rate SCREAMING_SNAKE_CASE_ : Any = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly SCREAMING_SNAKE_CASE_ : Union[str, Any] = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
318
"""simple docstring""" UpperCAmelCase_ : List[Any] = 9.8_0_6_6_5 def _A (__a , __a , __a = g ) -> float: """simple docstring""" if fluid_density <= 0: raise ValueError('''Impossible fluid density''' ) if volume < 0: raise ValueError('''Impossible Object volume''' ) if gravity <= 0: raise ValueError('''Impossible Gravity''' ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
318
1
"""simple docstring""" from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def _A (__a , __a , __a = "x" , __a = 10**-10 , __a = 1 , ) -> complex: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = symbols(__a ) SCREAMING_SNAKE_CASE_ : Tuple = lambdify(__a , __a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = lambdify(__a , diff(__a , __a ) ) SCREAMING_SNAKE_CASE_ : Dict = starting_point while True: if diff_function(__a ) != 0: SCREAMING_SNAKE_CASE_ : int = prev_guess - multiplicity * func(__a ) / diff_function( __a ) else: raise ZeroDivisionError('''Could not find root''' ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess SCREAMING_SNAKE_CASE_ : Optional[Any] = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''') # Find root of polynomial # Find fourth Root of 5 print(f'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}''') # Find value of e print( """The root of log(y) - 1 = 0 is """, f'''{newton_raphson('log(y) - 1', 2, variable='y')}''', ) # Exponential Roots print( """The root of exp(x) - 1 = 0 is""", f'''{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}''', ) # Find root of cos(x) print(f'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
318
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCAmelCase_ : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def _A (__a ) -> Union[str, Any]: """simple docstring""" from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__a ) def _A (__a ) -> Any: """simple docstring""" from transformers.testing_utils import pytest_terminal_summary_main SCREAMING_SNAKE_CASE_ : Optional[Any] = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__a , id=__a )
318
1
"""simple docstring""" from queue import PriorityQueue from typing import Any import numpy as np def _A (__a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> float | int: """simple docstring""" for nxt, d in graph[v]: if nxt in visited_forward: continue SCREAMING_SNAKE_CASE_ : Tuple = cst_fwd.get(__a , np.inf ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = new_cost_f SCREAMING_SNAKE_CASE_ : List[Any] = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: SCREAMING_SNAKE_CASE_ : int = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def _A (__a , __a , __a , __a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = -1 SCREAMING_SNAKE_CASE_ : int = set() SCREAMING_SNAKE_CASE_ : Tuple = set() SCREAMING_SNAKE_CASE_ : Tuple = {source: 0} SCREAMING_SNAKE_CASE_ : Optional[int] = {destination: 0} SCREAMING_SNAKE_CASE_ : Tuple = {source: None} SCREAMING_SNAKE_CASE_ : Tuple = {destination: None} SCREAMING_SNAKE_CASE_ : PriorityQueue[Any] = PriorityQueue() SCREAMING_SNAKE_CASE_ : PriorityQueue[Any] = PriorityQueue() SCREAMING_SNAKE_CASE_ : Optional[Any] = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = queue_forward.get() visited_forward.add(__a ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = queue_backward.get() visited_backward.add(__a ) SCREAMING_SNAKE_CASE_ : Tuple = pass_and_relaxation( __a , __a , __a , __a , __a , __a , __a , __a , __a , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = pass_and_relaxation( __a , __a , __a , __a , __a , __a , __a , __a , __a , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: SCREAMING_SNAKE_CASE_ : List[str] = shortest_distance return shortest_path_distance UpperCAmelCase_ : List[str] = { """B""": [["""C""", 1]], """C""": [["""D""", 1]], """D""": [["""F""", 1]], """E""": [["""B""", 1], ["""G""", 2]], """F""": [], """G""": [["""F""", 1]], } UpperCAmelCase_ : int = { """B""": [["""E""", 1]], """C""": [["""B""", 1]], """D""": [["""C""", 1]], """F""": [["""D""", 1], ["""G""", 1]], """E""": [[None, np.inf]], """G""": [["""E""", 2]], } if __name__ == "__main__": import doctest doctest.testmod()
318
"""simple docstring""" import argparse import os import re import packaging.version UpperCAmelCase_ : Any = """examples/""" UpperCAmelCase_ : Optional[int] = { """examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""), """doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } UpperCAmelCase_ : List[Any] = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } UpperCAmelCase_ : Optional[int] = """README.md""" def _A (__a , __a , __a ) -> int: """simple docstring""" with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = REPLACE_PATTERNS[pattern] SCREAMING_SNAKE_CASE_ : Optional[int] = replace.replace('''VERSION''' , __a ) SCREAMING_SNAKE_CASE_ : Tuple = re_pattern.sub(__a , __a ) with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(__a ) def _A (__a ) -> int: """simple docstring""" for folder, directories, fnames in os.walk(__a ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(__a , __a ) , __a , pattern='''examples''' ) def _A (__a , __a=False ) -> List[str]: """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__a , __a , __a ) if not patch: update_version_in_examples(__a ) def _A () -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = '''🤗 Transformers currently provides the following architectures''' SCREAMING_SNAKE_CASE_ : Optional[int] = '''1. Want to contribute a new model?''' with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: SCREAMING_SNAKE_CASE_ : Tuple = f.readlines() # Find the start of the list. SCREAMING_SNAKE_CASE_ : Tuple = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 SCREAMING_SNAKE_CASE_ : Dict = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): SCREAMING_SNAKE_CASE_ : List[Any] = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(__a ) def _A () -> List[str]: """simple docstring""" with open(REPLACE_FILES['''init'''] , '''r''' ) as f: SCREAMING_SNAKE_CASE_ : Any = f.read() SCREAMING_SNAKE_CASE_ : Dict = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0] return packaging.version.parse(__a ) def _A (__a=False ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: SCREAMING_SNAKE_CASE_ : List[Any] = default_version.base_version elif patch: SCREAMING_SNAKE_CASE_ : int = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: SCREAMING_SNAKE_CASE_ : Any = f'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. SCREAMING_SNAKE_CASE_ : int = input(f'Which version are you releasing? [{default_version}]' ) if len(__a ) == 0: SCREAMING_SNAKE_CASE_ : Optional[Any] = default_version print(f'Updating version to {version}.' ) global_version_update(__a , patch=__a ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def _A () -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = get_version() SCREAMING_SNAKE_CASE_ : Any = f'{current_version.major}.{current_version.minor + 1}.0.dev0' SCREAMING_SNAKE_CASE_ : Union[str, Any] = current_version.base_version # Check with the user we got that right. SCREAMING_SNAKE_CASE_ : int = input(f'Which version are we developing now? [{dev_version}]' ) if len(__a ) == 0: SCREAMING_SNAKE_CASE_ : Optional[int] = dev_version print(f'Updating version to {version}.' ) global_version_update(__a ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") UpperCAmelCase_ : int = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
318
1
"""simple docstring""" import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def _A (__a , __a="shi-labs/oneformer_demo" ) -> Union[str, Any]: """simple docstring""" with open(hf_hub_download(__a , __a , repo_type='''dataset''' ) , '''r''' ) as f: SCREAMING_SNAKE_CASE_ : Optional[int] = json.load(__a ) SCREAMING_SNAKE_CASE_ : Dict = {} SCREAMING_SNAKE_CASE_ : Union[str, Any] = [] SCREAMING_SNAKE_CASE_ : List[str] = [] for key, info in class_info.items(): SCREAMING_SNAKE_CASE_ : Optional[Any] = info['''name'''] class_names.append(info['''name'''] ) if info["isthing"]: thing_ids.append(int(__a ) ) SCREAMING_SNAKE_CASE_ : List[str] = thing_ids SCREAMING_SNAKE_CASE_ : List[str] = class_names return metadata class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Tuple , lowercase_ : Optional[int] , lowercase_ : str=7 , lowercase_ : Tuple=3 , lowercase_ : Optional[int]=30 , lowercase_ : List[Any]=400 , lowercase_ : Optional[Any]=None , lowercase_ : int=True , lowercase_ : Union[str, Any]=True , lowercase_ : Tuple=[0.5, 0.5, 0.5] , lowercase_ : int=[0.5, 0.5, 0.5] , lowercase_ : Tuple=10 , lowercase_ : Optional[int]=False , lowercase_ : Dict=255 , lowercase_ : List[str]="shi-labs/oneformer_demo" , lowercase_ : Tuple="ade20k_panoptic.json" , lowercase_ : Dict=10 , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = parent SCREAMING_SNAKE_CASE_ : List[Any] = batch_size SCREAMING_SNAKE_CASE_ : str = num_channels SCREAMING_SNAKE_CASE_ : Optional[int] = min_resolution SCREAMING_SNAKE_CASE_ : int = max_resolution SCREAMING_SNAKE_CASE_ : Tuple = do_resize SCREAMING_SNAKE_CASE_ : int = {'''shortest_edge''': 32, '''longest_edge''': 1333} if size is None else size SCREAMING_SNAKE_CASE_ : Dict = do_normalize SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_mean SCREAMING_SNAKE_CASE_ : Any = image_std SCREAMING_SNAKE_CASE_ : Union[str, Any] = class_info_file SCREAMING_SNAKE_CASE_ : int = prepare_metadata(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : int = num_text SCREAMING_SNAKE_CASE_ : Union[str, Any] = repo_path # for the post_process_functions SCREAMING_SNAKE_CASE_ : Any = 2 SCREAMING_SNAKE_CASE_ : Union[str, Any] = 10 SCREAMING_SNAKE_CASE_ : List[str] = 10 SCREAMING_SNAKE_CASE_ : Optional[int] = 3 SCREAMING_SNAKE_CASE_ : Tuple = 4 SCREAMING_SNAKE_CASE_ : int = num_labels SCREAMING_SNAKE_CASE_ : Optional[Any] = do_reduce_labels SCREAMING_SNAKE_CASE_ : Optional[Any] = ignore_index def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=False): '''simple docstring''' if not batched: SCREAMING_SNAKE_CASE_ : int = image_inputs[0] if isinstance(lowercase_ , Image.Image): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = image.size else: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = image.shape[1], image.shape[2] if w < h: SCREAMING_SNAKE_CASE_ : Any = int(self.size['''shortest_edge'''] * h / w) SCREAMING_SNAKE_CASE_ : Optional[int] = self.size['''shortest_edge'''] elif w > h: SCREAMING_SNAKE_CASE_ : List[Any] = self.size['''shortest_edge'''] SCREAMING_SNAKE_CASE_ : Dict = int(self.size['''shortest_edge'''] * w / h) else: SCREAMING_SNAKE_CASE_ : List[str] = self.size['''shortest_edge'''] SCREAMING_SNAKE_CASE_ : int = self.size['''shortest_edge'''] else: SCREAMING_SNAKE_CASE_ : Union[str, Any] = [] for image in image_inputs: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) SCREAMING_SNAKE_CASE_ : Tuple = max(lowercase_ , key=lambda lowercase_: item[0])[0] SCREAMING_SNAKE_CASE_ : List[Any] = max(lowercase_ , key=lambda lowercase_: item[1])[1] return expected_height, expected_width def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1)) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width)) , ) @require_torch @require_vision class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string __UpperCamelCase = image_processing_class def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = OneFormerImageProcessorTester(self) @property def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' return self.image_processing_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(lowercase_ , '''image_mean''')) self.assertTrue(hasattr(lowercase_ , '''image_std''')) self.assertTrue(hasattr(lowercase_ , '''do_normalize''')) self.assertTrue(hasattr(lowercase_ , '''do_resize''')) self.assertTrue(hasattr(lowercase_ , '''size''')) self.assertTrue(hasattr(lowercase_ , '''ignore_index''')) self.assertTrue(hasattr(lowercase_ , '''class_info_file''')) self.assertTrue(hasattr(lowercase_ , '''num_text''')) self.assertTrue(hasattr(lowercase_ , '''repo_path''')) self.assertTrue(hasattr(lowercase_ , '''metadata''')) self.assertTrue(hasattr(lowercase_ , '''do_reduce_labels''')) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowercase_) for image in image_inputs: self.assertIsInstance(lowercase_ , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_ : Any = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''').pixel_values SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_tester.get_expected_values(lowercase_) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.image_processing_tester.get_expected_values(lowercase_ , batched=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = image_processor( lowercase_ , ['''semantic'''] * len(lowercase_) , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowercase_ , numpify=lowercase_) for image in image_inputs: self.assertIsInstance(lowercase_ , np.ndarray) # Test not batched input SCREAMING_SNAKE_CASE_ : List[str] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''').pixel_values SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.image_processing_tester.get_expected_values(lowercase_) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.image_processing_tester.get_expected_values(lowercase_ , batched=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = image_processor( lowercase_ , ['''semantic'''] * len(lowercase_) , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ : List[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowercase_ , torchify=lowercase_) for image in image_inputs: self.assertIsInstance(lowercase_ , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''').pixel_values SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_tester.get_expected_values(lowercase_) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_tester.get_expected_values(lowercase_ , batched=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = image_processor( lowercase_ , ['''semantic'''] * len(lowercase_) , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : List[str]=False , lowercase_ : Dict=False , lowercase_ : List[Any]="np"): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict) # prepare image and target SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_tester.num_labels SCREAMING_SNAKE_CASE_ : List[Any] = None SCREAMING_SNAKE_CASE_ : List[str] = None SCREAMING_SNAKE_CASE_ : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowercase_) if with_segmentation_maps: SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_labels if is_instance_map: SCREAMING_SNAKE_CASE_ : Tuple = list(range(lowercase_)) * 2 SCREAMING_SNAKE_CASE_ : Optional[int] = dict(enumerate(lowercase_)) SCREAMING_SNAKE_CASE_ : Dict = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0])).astype(np.uinta) for img in image_inputs ] if segmentation_type == "pil": SCREAMING_SNAKE_CASE_ : int = [Image.fromarray(lowercase_) for annotation in annotations] SCREAMING_SNAKE_CASE_ : Optional[int] = image_processor( lowercase_ , ['''semantic'''] * len(lowercase_) , lowercase_ , return_tensors='''pt''' , instance_id_to_semantic_id=lowercase_ , pad_and_return_pixel_mask=lowercase_ , ) return inputs def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' def common(lowercase_ : List[str]=False , lowercase_ : Dict=None): SCREAMING_SNAKE_CASE_ : str = self.comm_get_image_processor_inputs( with_segmentation_maps=lowercase_ , is_instance_map=lowercase_ , segmentation_type=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = inputs['''mask_labels'''] SCREAMING_SNAKE_CASE_ : int = inputs['''class_labels'''] SCREAMING_SNAKE_CASE_ : Union[str, Any] = inputs['''pixel_values'''] SCREAMING_SNAKE_CASE_ : Tuple = inputs['''text_inputs'''] # check the batch_size for mask_label, class_label, text_input in zip(lowercase_ , lowercase_ , lowercase_): self.assertEqual(mask_label.shape[0] , class_label.shape[0]) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:]) self.assertEqual(len(lowercase_) , self.image_processing_tester.num_text) common() common(is_instance_map=lowercase_) common(is_instance_map=lowercase_ , segmentation_type='''pil''') common(is_instance_map=lowercase_ , segmentation_type='''pil''') def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros((20, 50)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1 SCREAMING_SNAKE_CASE_ : str = 1 SCREAMING_SNAKE_CASE_ : Dict = 1 SCREAMING_SNAKE_CASE_ : Union[str, Any] = binary_mask_to_rle(lowercase_) self.assertEqual(len(lowercase_) , 4) self.assertEqual(rle[0] , 21) self.assertEqual(rle[1] , 45) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) SCREAMING_SNAKE_CASE_ : str = self.image_processing_tester.get_fake_oneformer_outputs() SCREAMING_SNAKE_CASE_ : Dict = fature_extractor.post_process_semantic_segmentation(lowercase_) self.assertEqual(len(lowercase_) , self.image_processing_tester.batch_size) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size)] SCREAMING_SNAKE_CASE_ : str = fature_extractor.post_process_semantic_segmentation(lowercase_ , target_sizes=lowercase_) self.assertEqual(segmentation[0].shape , target_sizes[0]) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) SCREAMING_SNAKE_CASE_ : Any = self.image_processing_tester.get_fake_oneformer_outputs() SCREAMING_SNAKE_CASE_ : str = image_processor.post_process_instance_segmentation(lowercase_ , threshold=0) self.assertTrue(len(lowercase_) == self.image_processing_tester.batch_size) for el in segmentation: self.assertTrue('''segmentation''' in el) self.assertTrue('''segments_info''' in el) self.assertEqual(type(el['''segments_info''']) , lowercase_) self.assertEqual( el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs() SCREAMING_SNAKE_CASE_ : Tuple = image_processor.post_process_panoptic_segmentation(lowercase_ , threshold=0) self.assertTrue(len(lowercase_) == self.image_processing_tester.batch_size) for el in segmentation: self.assertTrue('''segmentation''' in el) self.assertTrue('''segments_info''' in el) self.assertEqual(type(el['''segments_info''']) , lowercase_) self.assertEqual( el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width))
318
"""simple docstring""" from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def _A (__a , __a , __a=1e-12 ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T SCREAMING_SNAKE_CASE_ : List[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T return jnp.matmul(__a , norm_emb_a.T ) class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = jnp.floataa def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxCLIPVisionModule(self.config.vision_config) SCREAMING_SNAKE_CASE_ : Tuple = nn.Dense(self.config.projection_dim , use_bias=lowercase_ , dtype=self.dtype) SCREAMING_SNAKE_CASE_ : List[str] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param( '''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim)) SCREAMING_SNAKE_CASE_ : Dict = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,)) def __call__( self : Optional[Any] , lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.vision_model(lowercase_)[1] SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.visual_projection(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.special_care_embeds) SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.concept_embeds) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs SCREAMING_SNAKE_CASE_ : Tuple = 0.0 SCREAMING_SNAKE_CASE_ : Dict = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.round(lowercase_ , 3) SCREAMING_SNAKE_CASE_ : List[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=lowercase_) # Use a lower threshold if an image has any special care concept SCREAMING_SNAKE_CASE_ : Dict = is_special_care * 0.01 SCREAMING_SNAKE_CASE_ : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment SCREAMING_SNAKE_CASE_ : Any = jnp.round(lowercase_ , 3) SCREAMING_SNAKE_CASE_ : Dict = jnp.any(concept_scores > 0 , axis=1) return has_nsfw_concepts class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = CLIPConfig __UpperCamelCase = "clip_input" __UpperCamelCase = FlaxStableDiffusionSafetyCheckerModule def __init__( self : Union[str, Any] , lowercase_ : CLIPConfig , lowercase_ : Optional[Tuple] = None , lowercase_ : int = 0 , lowercase_ : jnp.dtype = jnp.floataa , lowercase_ : bool = True , **lowercase_ : Any , ): '''simple docstring''' if input_shape is None: SCREAMING_SNAKE_CASE_ : List[str] = (1, 224, 224, 3) SCREAMING_SNAKE_CASE_ : List[Any] = self.module_class(config=lowercase_ , dtype=lowercase_ , **lowercase_) super().__init__(lowercase_ , lowercase_ , input_shape=lowercase_ , seed=lowercase_ , dtype=lowercase_ , _do_init=_do_init) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : jax.random.KeyArray , lowercase_ : Tuple , lowercase_ : FrozenDict = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = jax.random.normal(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.random.split(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = {'''params''': params_rng, '''dropout''': dropout_rng} SCREAMING_SNAKE_CASE_ : List[Any] = self.module.init(lowercase_ , lowercase_)['''params'''] return random_params def __call__( self : List[Any] , lowercase_ : List[str] , lowercase_ : dict = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = jnp.transpose(lowercase_ , (0, 2, 3, 1)) return self.module.apply( {'''params''': params or self.params} , jnp.array(lowercase_ , dtype=jnp.floataa) , rngs={} , )
318
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase_ : Optional[int] = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[Any] = [ """VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMAEForPreTraining""", """ViTMAELayer""", """ViTMAEModel""", """ViTMAEPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Dict = [ """TFViTMAEForPreTraining""", """TFViTMAEModel""", """TFViTMAEPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys UpperCAmelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
318
"""simple docstring""" from __future__ import annotations import queue class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Tuple , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = data SCREAMING_SNAKE_CASE_ : Tuple = None SCREAMING_SNAKE_CASE_ : Dict = None def _A () -> TreeNode: """simple docstring""" print('''\n********Press N to stop entering at any point of time********\n''' ) SCREAMING_SNAKE_CASE_ : List[Any] = input('''Enter the value of the root node: ''' ).strip().lower() SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue() SCREAMING_SNAKE_CASE_ : Union[str, Any] = TreeNode(int(__a ) ) q.put(__a ) while not q.empty(): SCREAMING_SNAKE_CASE_ : Optional[int] = q.get() SCREAMING_SNAKE_CASE_ : List[str] = f'Enter the left node of {node_found.data}: ' SCREAMING_SNAKE_CASE_ : Optional[int] = input(__a ).strip().lower() or '''n''' if check == "n": return tree_node SCREAMING_SNAKE_CASE_ : List[str] = TreeNode(int(__a ) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = left_node q.put(__a ) SCREAMING_SNAKE_CASE_ : str = f'Enter the right node of {node_found.data}: ' SCREAMING_SNAKE_CASE_ : str = input(__a ).strip().lower() or '''n''' if check == "n": return tree_node SCREAMING_SNAKE_CASE_ : Any = TreeNode(int(__a ) ) SCREAMING_SNAKE_CASE_ : int = right_node q.put(__a ) raise def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return print(node.data , end=''',''' ) pre_order(node.left ) pre_order(node.right ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return in_order(node.left ) print(node.data , end=''',''' ) in_order(node.right ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=''',''' ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue() q.put(__a ) while not q.empty(): SCREAMING_SNAKE_CASE_ : Tuple = q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue() q.put(__a ) while not q.empty(): SCREAMING_SNAKE_CASE_ : str = [] while not q.empty(): SCREAMING_SNAKE_CASE_ : List[str] = q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__a ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ : list[TreeNode] = [] SCREAMING_SNAKE_CASE_ : Union[str, Any] = node while n or stack: while n: # start from root node, find its left child print(n.data , end=''',''' ) stack.append(__a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = n.left # end of while means current node doesn't have left child SCREAMING_SNAKE_CASE_ : Tuple = stack.pop() # start to traverse its right child SCREAMING_SNAKE_CASE_ : str = n.right def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ : list[TreeNode] = [] SCREAMING_SNAKE_CASE_ : Any = node while n or stack: while n: stack.append(__a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.left SCREAMING_SNAKE_CASE_ : Any = stack.pop() print(n.data , end=''',''' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.right def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = [], [] SCREAMING_SNAKE_CASE_ : List[Any] = node stacka.append(__a ) while stacka: # to find the reversed order of post order, store it in stack2 SCREAMING_SNAKE_CASE_ : List[str] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__a ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=''',''' ) def _A (__a = "" , __a=50 , __a="*" ) -> str: """simple docstring""" if not s: return "\n" + width * char SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(width - len(__a ) - 2 , 2 ) return f'{left * char} {s} {(left + extra) * char}' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) UpperCAmelCase_ : TreeNode = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
318
1
"""simple docstring""" from scipy.stats import pearsonr import datasets UpperCAmelCase_ : List[Any] = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ UpperCAmelCase_ : Optional[int] = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ UpperCAmelCase_ : Tuple = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float'''), '''references''': datasets.Value('''float'''), }) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=False): '''simple docstring''' if return_pvalue: SCREAMING_SNAKE_CASE_ : int = pearsonr(lowercase_ , lowercase_) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(lowercase_ , lowercase_)[0])}
318
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = "ssube/stable-diffusion-x4-upscaler-onnx" def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any]=0): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_)) SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Any = np.array( [0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23]) assert np.abs(image_slice - expected_slice).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') SCREAMING_SNAKE_CASE_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Any = np.array( [0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Tuple = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Tuple = np.array( [0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') SCREAMING_SNAKE_CASE_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array( [0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') SCREAMING_SNAKE_CASE_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : int = np.array( [0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = ort.SessionOptions() SCREAMING_SNAKE_CASE_ : Optional[int] = False return options def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''') SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128)) # using the PNDM scheduler by default SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''A fantasy landscape, trending on artstation''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : List[Any] = pipe( prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type='''np''' , ) SCREAMING_SNAKE_CASE_ : Optional[int] = output.images SCREAMING_SNAKE_CASE_ : Optional[int] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : int = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''') SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128)) SCREAMING_SNAKE_CASE_ : Tuple = LMSDiscreteScheduler.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''') SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : int = '''A fantasy landscape, trending on artstation''' SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : int = pipe( prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type='''np''' , ) SCREAMING_SNAKE_CASE_ : Optional[int] = output.images SCREAMING_SNAKE_CASE_ : Dict = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : List[str] = np.array( [0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
318
1
"""simple docstring""" def _A (__a = 10_00 ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = 2**power SCREAMING_SNAKE_CASE_ : List[str] = str(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[Any] = 0 for i in list_num: sum_of_num += int(_UpperCAmelCase ) return sum_of_num if __name__ == "__main__": UpperCAmelCase_ : List[Any] = int(input("""Enter the power of 2: """).strip()) print("""2 ^ """, power, """ = """, 2**power) UpperCAmelCase_ : Dict = solution(power) print("""Sum of the digits is: """, result)
350
"""simple docstring""" from scipy.stats import pearsonr import datasets UpperCAmelCase_ : List[Any] = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ UpperCAmelCase_ : Optional[int] = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ UpperCAmelCase_ : Tuple = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float'''), '''references''': datasets.Value('''float'''), }) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=False): '''simple docstring''' if return_pvalue: SCREAMING_SNAKE_CASE_ : int = pearsonr(lowercase_ , lowercase_) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(lowercase_ , lowercase_)[0])}
318
0
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _A (__a = "isbn/0140328726" ) -> dict: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: SCREAMING_SNAKE_CASE_ : Optional[int] = f'{olid} is not a valid Open Library olid' raise ValueError(lowercase__ ) return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json() def _A (__a ) -> dict: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = { """title""": """Title""", """publish_date""": """Publish date""", """authors""": """Authors""", """number_of_pages""": """Number of pages:""", """first_sentence""": """First sentence""", """isbn_10""": """ISBN (10)""", """isbn_13""": """ISBN (13)""", } SCREAMING_SNAKE_CASE_ : Any = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} SCREAMING_SNAKE_CASE_ : int = [ get_openlibrary_data(author['''key'''] )["""name"""] for author in data["""Authors"""] ] SCREAMING_SNAKE_CASE_ : Union[str, Any] = data["""First sentence"""]["""value"""] for key, value in data.items(): if isinstance(lowercase__ , lowercase__ ): SCREAMING_SNAKE_CASE_ : int = """, """.join(lowercase__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: UpperCAmelCase_ : Optional[int] = input("""\nEnter the ISBN code to search (or \'quit\' to stop): """).strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(f'''\nSearching Open Library for ISBN: {isbn}...\n''') try: UpperCAmelCase_ : int = summarize_book(get_openlibrary_data(f'''isbn/{isbn}''')) print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'''Sorry, there are no results for ISBN: {isbn}.''')
351
"""simple docstring""" import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class lowerCAmelCase__ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Any , lowercase_ : Dict[str, int] , lowercase_ : List[str] , lowercase_ : int = None , lowercase_ : int = None): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE_ : str = pad_token_id SCREAMING_SNAKE_CASE_ : Optional[int] = max_length SCREAMING_SNAKE_CASE_ : Dict = vocab SCREAMING_SNAKE_CASE_ : Dict = merges SCREAMING_SNAKE_CASE_ : Union[str, Any] = BytePairTokenizer(lowercase_ , lowercase_ , sequence_length=lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : GPTaTokenizer , *lowercase_ : Optional[Any] , **lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = [''' '''.join(lowercase_) for m in tokenizer.bpe_ranks.keys()] SCREAMING_SNAKE_CASE_ : str = tokenizer.get_vocab() return cls(lowercase_ , lowercase_ , *lowercase_ , **lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : int , lowercase_ : Union[str, os.PathLike] , *lowercase_ : List[str] , **lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = GPTaTokenizer.from_pretrained(lowercase_ , *lowercase_ , **lowercase_) return cls.from_tokenizer(lowercase_ , *lowercase_ , **lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : List[Any]): '''simple docstring''' return cls(**lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[Any] , lowercase_ : int = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.tf_tokenizer(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = tf.ones_like(lowercase_) if self.pad_token_id is not None: # pad the tokens up to max length SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_length if max_length is not None else self.max_length if max_length is not None: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = pad_model_inputs( lowercase_ , max_seq_length=lowercase_ , pad_value=self.pad_token_id) return {"attention_mask": attention_mask, "input_ids": input_ids}
318
0
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. UpperCAmelCase_ : List[str] = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' __UpperCamelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __UpperCamelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: __UpperCamelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: __UpperCamelCase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Any , lowercase_ : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = ZeroShotClassificationPipeline( model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , candidate_labels=['''polics''', '''health''']) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''') self.assertEqual(_UpperCAmelCase , {'''sequence''': ANY(_UpperCAmelCase), '''labels''': [ANY(_UpperCAmelCase)], '''scores''': [ANY(_UpperCAmelCase)]}) # No kwarg SCREAMING_SNAKE_CASE_ : int = classifier('''Who are you voting for in 2020?''' , ['''politics''']) self.assertEqual(_UpperCAmelCase , {'''sequence''': ANY(_UpperCAmelCase), '''labels''': [ANY(_UpperCAmelCase)], '''scores''': [ANY(_UpperCAmelCase)]}) SCREAMING_SNAKE_CASE_ : Dict = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''']) self.assertEqual(_UpperCAmelCase , {'''sequence''': ANY(_UpperCAmelCase), '''labels''': [ANY(_UpperCAmelCase)], '''scores''': [ANY(_UpperCAmelCase)]}) SCREAMING_SNAKE_CASE_ : List[Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''') self.assertEqual( _UpperCAmelCase , {'''sequence''': ANY(_UpperCAmelCase), '''labels''': [ANY(_UpperCAmelCase), ANY(_UpperCAmelCase)], '''scores''': [ANY(_UpperCAmelCase), ANY(_UpperCAmelCase)]}) self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''])) , 1.0) SCREAMING_SNAKE_CASE_ : Optional[Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''']) self.assertEqual( _UpperCAmelCase , {'''sequence''': ANY(_UpperCAmelCase), '''labels''': [ANY(_UpperCAmelCase), ANY(_UpperCAmelCase)], '''scores''': [ANY(_UpperCAmelCase), ANY(_UpperCAmelCase)]}) self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''])) , 1.0) SCREAMING_SNAKE_CASE_ : Tuple = classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''') self.assertEqual(_UpperCAmelCase , {'''sequence''': ANY(_UpperCAmelCase), '''labels''': [ANY(_UpperCAmelCase)], '''scores''': [ANY(_UpperCAmelCase)]}) # https://github.com/huggingface/transformers/issues/13846 SCREAMING_SNAKE_CASE_ : Union[str, Any] = classifier(['''I am happy'''] , ['''positive''', '''negative''']) self.assertEqual( _UpperCAmelCase , [ {'''sequence''': ANY(_UpperCAmelCase), '''labels''': [ANY(_UpperCAmelCase), ANY(_UpperCAmelCase)], '''scores''': [ANY(_UpperCAmelCase), ANY(_UpperCAmelCase)]} for i in range(1) ] , ) SCREAMING_SNAKE_CASE_ : Any = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative''']) self.assertEqual( _UpperCAmelCase , [ {'''sequence''': ANY(_UpperCAmelCase), '''labels''': [ANY(_UpperCAmelCase), ANY(_UpperCAmelCase)], '''scores''': [ANY(_UpperCAmelCase), ANY(_UpperCAmelCase)]} for i in range(2) ] , ) with self.assertRaises(_UpperCAmelCase): classifier('''''' , candidate_labels='''politics''') with self.assertRaises(_UpperCAmelCase): classifier(_UpperCAmelCase , candidate_labels='''politics''') with self.assertRaises(_UpperCAmelCase): classifier('''Who are you voting for in 2020?''' , candidate_labels='''''') with self.assertRaises(_UpperCAmelCase): classifier('''Who are you voting for in 2020?''' , candidate_labels=_UpperCAmelCase) with self.assertRaises(_UpperCAmelCase): classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , ) with self.assertRaises(_UpperCAmelCase): classifier( '''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=_UpperCAmelCase , ) self.run_entailment_id(_UpperCAmelCase) def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = zero_shot_classifier.model.config SCREAMING_SNAKE_CASE_ : Optional[Any] = config.labelaid SCREAMING_SNAKE_CASE_ : Dict = zero_shot_classifier.entailment_id SCREAMING_SNAKE_CASE_ : Optional[int] = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2} self.assertEqual(zero_shot_classifier.entailment_id , -1) SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2} self.assertEqual(zero_shot_classifier.entailment_id , 0) SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1} self.assertEqual(zero_shot_classifier.entailment_id , 0) SCREAMING_SNAKE_CASE_ : Optional[int] = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0} self.assertEqual(zero_shot_classifier.entailment_id , 2) SCREAMING_SNAKE_CASE_ : List[str] = original_labelaid self.assertEqual(_UpperCAmelCase , zero_shot_classifier.entailment_id) @require_torch def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( '''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science''']) @require_torch def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , ) SCREAMING_SNAKE_CASE_ : List[Any] = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science''']) self.assertEqual( nested_simplify(_UpperCAmelCase) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''science''', '''public health''', '''politics'''], '''scores''': [0.3_33, 0.3_33, 0.3_33], } , ) @require_tf def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = pipeline( '''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , ) SCREAMING_SNAKE_CASE_ : List[Any] = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science''']) self.assertEqual( nested_simplify(_UpperCAmelCase) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''science''', '''public health''', '''politics'''], '''scores''': [0.3_33, 0.3_33, 0.3_33], } , ) @slow @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''') SCREAMING_SNAKE_CASE_ : Optional[int] = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science''']) self.assertEqual( nested_simplify(_UpperCAmelCase) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''politics''', '''public health''', '''science'''], '''scores''': [0.9_76, 0.0_15, 0.0_09], } , ) SCREAMING_SNAKE_CASE_ : Optional[int] = zero_shot_classifier( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks''' ''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder''' ''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based''' ''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two''' ''' machine translation tasks show these models to be superior in quality while being more parallelizable''' ''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014''' ''' English-to-German translation task, improving over the existing best results, including ensembles by''' ''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new''' ''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small''' ''' fraction of the training costs of the best models from the literature. We show that the Transformer''' ''' generalizes well to other tasks by applying it successfully to English constituency parsing both with''' ''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_UpperCAmelCase , ) self.assertEqual( nested_simplify(_UpperCAmelCase) , { '''sequence''': ( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural''' ''' networks in an encoder-decoder configuration. The best performing models also connect the''' ''' encoder and decoder through an attention mechanism. We propose a new simple network''' ''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence''' ''' and convolutions entirely. Experiments on two machine translation tasks show these models to be''' ''' superior in quality while being more parallelizable and requiring significantly less time to''' ''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,''' ''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014''' ''' English-to-French translation task, our model establishes a new single-model state-of-the-art''' ''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training''' ''' costs of the best models from the literature. We show that the Transformer generalizes well to''' ''' other tasks by applying it successfully to English constituency parsing both with large and''' ''' limited training data.''' ), '''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''], '''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18], } , ) @slow @require_tf def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''') SCREAMING_SNAKE_CASE_ : List[Any] = zero_shot_classifier( '''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science''']) self.assertEqual( nested_simplify(_UpperCAmelCase) , { '''sequence''': '''Who are you voting for in 2020?''', '''labels''': ['''politics''', '''public health''', '''science'''], '''scores''': [0.9_76, 0.0_15, 0.0_09], } , ) SCREAMING_SNAKE_CASE_ : Tuple = zero_shot_classifier( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks''' ''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder''' ''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based''' ''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two''' ''' machine translation tasks show these models to be superior in quality while being more parallelizable''' ''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014''' ''' English-to-German translation task, improving over the existing best results, including ensembles by''' ''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new''' ''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small''' ''' fraction of the training costs of the best models from the literature. We show that the Transformer''' ''' generalizes well to other tasks by applying it successfully to English constituency parsing both with''' ''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_UpperCAmelCase , ) self.assertEqual( nested_simplify(_UpperCAmelCase) , { '''sequence''': ( '''The dominant sequence transduction models are based on complex recurrent or convolutional neural''' ''' networks in an encoder-decoder configuration. The best performing models also connect the''' ''' encoder and decoder through an attention mechanism. We propose a new simple network''' ''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence''' ''' and convolutions entirely. Experiments on two machine translation tasks show these models to be''' ''' superior in quality while being more parallelizable and requiring significantly less time to''' ''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,''' ''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014''' ''' English-to-French translation task, our model establishes a new single-model state-of-the-art''' ''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training''' ''' costs of the best models from the literature. We show that the Transformer generalizes well to''' ''' other tasks by applying it successfully to English constituency parsing both with large and''' ''' limited training data.''' ), '''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''], '''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18], } , )
352
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ : Union[str, Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''的''', '''价''', '''格''', '''是''', '''15''', '''便''', '''alex''', '''##andra''', ''',''', '''。''', '''-''', '''t''', '''shirt''', ] SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens])) SCREAMING_SNAKE_CASE_ : Dict = { '''do_resize''': True, '''size''': {'''height''': 224, '''width''': 224}, '''do_center_crop''': True, '''crop_size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], '''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], '''do_convert_rgb''': True, } SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , lowercase_) with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp: json.dump(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowercase_ : str): '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowercase_ : List[Any]): '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowercase_ : str): '''simple docstring''' return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] SCREAMING_SNAKE_CASE_ : Dict = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor() SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) processor_slow.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_) SCREAMING_SNAKE_CASE_ : Any = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) processor_fast.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , lowercase_) self.assertIsInstance(processor_fast.tokenizer , lowercase_) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , lowercase_) self.assertIsInstance(processor_fast.image_processor , lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''') SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor(do_normalize=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=lowercase_) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , lowercase_) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Any = image_processor(lowercase_ , return_tensors='''np''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(images=lowercase_ , return_tensors='''np''') for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : str = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = '''Alexandra,T-shirt的价格是15便士。''' SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(lowercase_) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''Alexandra,T-shirt的价格是15便士。''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : int = processor(text=lowercase_ , images=lowercase_) self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values''']) # test if it raises when no input is passed with pytest.raises(lowercase_): processor() def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE_ : Optional[int] = processor.batch_decode(lowercase_) SCREAMING_SNAKE_CASE_ : Dict = tokenizer.batch_decode(lowercase_) self.assertListEqual(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Dict = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = '''Alexandra,T-shirt的价格是15便士。''' SCREAMING_SNAKE_CASE_ : Dict = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Dict = processor(text=lowercase_ , images=lowercase_) self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
318
0
"""simple docstring""" def _A (__a ) -> Optional[Any]: """simple docstring""" return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def _A (__a ) -> list[tuple[int, int]]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = 0 SCREAMING_SNAKE_CASE_ : Tuple = len(__lowerCAmelCase ) # No of vertices in graph SCREAMING_SNAKE_CASE_ : Any = [0] * n SCREAMING_SNAKE_CASE_ : List[str] = [False] * n def dfs(__a , __a , __a , __a ): SCREAMING_SNAKE_CASE_ : Dict = True SCREAMING_SNAKE_CASE_ : Union[str, Any] = id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , id_ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(low[at] , low[to] ) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at) ) else: # This edge is a back edge and cannot be a bridge SCREAMING_SNAKE_CASE_ : int = min(low[at] , low[to] ) SCREAMING_SNAKE_CASE_ : int = [] for i in range(__lowerCAmelCase ): if not visited[i]: dfs(__lowerCAmelCase , -1 , __lowerCAmelCase , id_ ) return bridges if __name__ == "__main__": import doctest doctest.testmod()
353
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "rwkv" __UpperCamelCase = {"max_position_embeddings": "context_length"} def __init__( self : Union[str, Any] , lowercase_ : Any=50277 , lowercase_ : str=1024 , lowercase_ : List[str]=4096 , lowercase_ : Optional[Any]=32 , lowercase_ : Any=None , lowercase_ : Any=None , lowercase_ : List[Any]=1e-5 , lowercase_ : Union[str, Any]=0 , lowercase_ : Union[str, Any]=0 , lowercase_ : int=6 , lowercase_ : Tuple=False , lowercase_ : Any=True , **lowercase_ : Any , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size SCREAMING_SNAKE_CASE_ : Any = context_length SCREAMING_SNAKE_CASE_ : int = hidden_size SCREAMING_SNAKE_CASE_ : int = num_hidden_layers SCREAMING_SNAKE_CASE_ : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size SCREAMING_SNAKE_CASE_ : int = intermediate_size if intermediate_size is not None else 4 * hidden_size SCREAMING_SNAKE_CASE_ : int = layer_norm_epsilon SCREAMING_SNAKE_CASE_ : Optional[int] = rescale_every SCREAMING_SNAKE_CASE_ : Dict = use_cache SCREAMING_SNAKE_CASE_ : Dict = bos_token_id SCREAMING_SNAKE_CASE_ : Any = eos_token_id super().__init__( tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
318
0
"""simple docstring""" import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py UpperCAmelCase_ = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. UpperCAmelCase_ = direct_transformers_import(PATH_TO_TRANSFORMERS) UpperCAmelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` UpperCAmelCase_ = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") UpperCAmelCase_ = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def _A (__a ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = None # source code of `config_class` SCREAMING_SNAKE_CASE_ : Tuple = inspect.getsource(lowerCamelCase__ ) SCREAMING_SNAKE_CASE_ : Tuple = _re_checkpoint.findall(lowerCamelCase__ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/''' ): SCREAMING_SNAKE_CASE_ : Tuple = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link SCREAMING_SNAKE_CASE_ : Union[str, Any] = f'https://huggingface.co/{ckpt_name}' if ckpt_link == ckpt_link_from_name: SCREAMING_SNAKE_CASE_ : List[str] = ckpt_name break return checkpoint def _A () -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue SCREAMING_SNAKE_CASE_ : Optional[int] = get_checkpoint_from_config_class(lowerCamelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[Any] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: SCREAMING_SNAKE_CASE_ : str = '''\n'''.join(sorted(lowerCamelCase__ ) ) raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
354
"""simple docstring""" UpperCAmelCase_ : Optional[int] = 8.3_1_4_4_5_9_8 def _A (__a , __a ) -> float: """simple docstring""" if temperature < 0: raise Exception('''Temperature cannot be less than 0 K''' ) if molar_mass <= 0: raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example UpperCAmelCase_ : str = 300 UpperCAmelCase_ : str = 28 UpperCAmelCase_ : Any = rms_speed_of_molecule(temperature, molar_mass) print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
318
0
"""simple docstring""" import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer UpperCAmelCase_ : Any = logging.get_logger(__name__) class lowerCAmelCase__ ( lowercase__ ): '''simple docstring''' __UpperCamelCase = '''AutoTokenizer''' __UpperCamelCase = ['''tokenizer'''] __UpperCamelCase = { '''semantic_prompt''': 1, '''coarse_prompt''': 2, '''fine_prompt''': 2, } def __init__( self : Dict , lowercase_ : Any , lowercase_ : Optional[int]=None): '''simple docstring''' super().__init__(_a) SCREAMING_SNAKE_CASE_ : Dict = speaker_embeddings @classmethod def _SCREAMING_SNAKE_CASE ( cls : Tuple , lowercase_ : Any , lowercase_ : Dict="speaker_embeddings_path.json" , **lowercase_ : Any): '''simple docstring''' if speaker_embeddings_dict_path is not None: SCREAMING_SNAKE_CASE_ : Optional[Any] = get_file_from_repo( _a , _a , subfolder=kwargs.pop('''subfolder''' , _a) , cache_dir=kwargs.pop('''cache_dir''' , _a) , force_download=kwargs.pop('''force_download''' , _a) , proxies=kwargs.pop('''proxies''' , _a) , resume_download=kwargs.pop('''resume_download''' , _a) , local_files_only=kwargs.pop('''local_files_only''' , _a) , use_auth_token=kwargs.pop('''use_auth_token''' , _a) , revision=kwargs.pop('''revision''' , _a) , ) if speaker_embeddings_path is None: logger.warning( F'`{os.path.join(_a , _a)}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.') SCREAMING_SNAKE_CASE_ : Tuple = None else: with open(_a) as speaker_embeddings_json: SCREAMING_SNAKE_CASE_ : Tuple = json.load(_a) else: SCREAMING_SNAKE_CASE_ : Any = None SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(_a , **_a) return cls(tokenizer=_a , speaker_embeddings=_a) def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Tuple , lowercase_ : List[str]="speaker_embeddings_path.json" , lowercase_ : Tuple="speaker_embeddings" , lowercase_ : bool = False , **lowercase_ : List[Any] , ): '''simple docstring''' if self.speaker_embeddings is not None: os.makedirs(os.path.join(_a , _a , '''v2''') , exist_ok=_a) SCREAMING_SNAKE_CASE_ : Optional[int] = {} SCREAMING_SNAKE_CASE_ : int = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": SCREAMING_SNAKE_CASE_ : Tuple = self._load_voice_preset(_a) SCREAMING_SNAKE_CASE_ : List[Any] = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['''repo_or_path'''] , _a , F'{prompt_key}_{key}') , voice_preset[key] , allow_pickle=_a , ) SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_a , F'{prompt_key}_{key}.npy') SCREAMING_SNAKE_CASE_ : Optional[int] = tmp_dict with open(os.path.join(_a , _a) , '''w''') as fp: json.dump(_a , _a) super().save_pretrained(_a , _a , **_a) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : str = None , **lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.speaker_embeddings[voice_preset] SCREAMING_SNAKE_CASE_ : int = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].') SCREAMING_SNAKE_CASE_ : Optional[Any] = get_file_from_repo( self.speaker_embeddings.get('''repo_or_path''' , '''/''') , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , _a) , cache_dir=kwargs.pop('''cache_dir''' , _a) , force_download=kwargs.pop('''force_download''' , _a) , proxies=kwargs.pop('''proxies''' , _a) , resume_download=kwargs.pop('''resume_download''' , _a) , local_files_only=kwargs.pop('''local_files_only''' , _a) , use_auth_token=kwargs.pop('''use_auth_token''' , _a) , revision=kwargs.pop('''revision''' , _a) , ) if path is None: raise ValueError( F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/") , voice_preset_paths[key])}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.') SCREAMING_SNAKE_CASE_ : int = np.load(_a) return voice_preset_dict def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Optional[dict] = None): '''simple docstring''' for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F'Voice preset unrecognized, missing {key} as a key.') if not isinstance(voice_preset[key] , np.ndarray): raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.') if len(voice_preset[key].shape) != self.preset_shape[key]: raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.') def __call__( self : int , lowercase_ : str=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]="pt" , lowercase_ : List[Any]=256 , lowercase_ : List[Any]=False , lowercase_ : str=True , lowercase_ : Optional[Any]=False , **lowercase_ : str , ): '''simple docstring''' if voice_preset is not None and not isinstance(_a , _a): if ( isinstance(_a , _a) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): SCREAMING_SNAKE_CASE_ : Tuple = self._load_voice_preset(_a) else: if isinstance(_a , _a) and not voice_preset.endswith('''.npz'''): SCREAMING_SNAKE_CASE_ : int = voice_preset + '.npz' SCREAMING_SNAKE_CASE_ : Dict = np.load(_a) if voice_preset is not None: self._validate_voice_preset_dict(_a , **_a) SCREAMING_SNAKE_CASE_ : List[str] = BatchFeature(data=_a , tensor_type=_a) SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer( _a , return_tensors=_a , padding='''max_length''' , max_length=_a , return_attention_mask=_a , return_token_type_ids=_a , add_special_tokens=_a , **_a , ) if voice_preset is not None: SCREAMING_SNAKE_CASE_ : Any = voice_preset return encoded_text
355
"""simple docstring""" import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList UpperCAmelCase_ : Union[str, Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""] class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : Dict=1): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer SCREAMING_SNAKE_CASE_ : Optional[int] = dataset SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowercase_) if n_tasks is None else n_tasks SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies def __iter__( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = [] for task in range(self.n_tasks): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip()) SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''') for task in range(self.n_tasks): for _ in range(self.n_copies): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = start_length SCREAMING_SNAKE_CASE_ : List[Any] = eof_strings SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer def __call__( self : Optional[int] , lowercase_ : Any , lowercase_ : int , **lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.tokenizer.batch_decode(input_ids[:, self.start_length :]) SCREAMING_SNAKE_CASE_ : Tuple = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings)) return all(lowercase_) def _A (__a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = re.split('''(%s)''' % '''|'''.join(__a ) , __a ) # last string should be "" return "".join(string_list[:-2] ) def _A (__a , __a , __a , __a , __a , __a=20 , **__a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = defaultdict(__a ) # dict of list of generated tokens for step, batch in tqdm(enumerate(__a ) ): with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Optional[int] = batch['''ids'''].shape[-1] SCREAMING_SNAKE_CASE_ : Tuple = accelerator.unwrap_model(__a ).generate( input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__a , **__a ) # each task is generated batch_size times SCREAMING_SNAKE_CASE_ : List[Any] = batch['''task_id'''].repeat(__a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.pad_across_processes( __a , dim=1 , pad_index=tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) ) SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy() SCREAMING_SNAKE_CASE_ : Optional[Any] = generated_tasks.cpu().numpy() for task, generated_tokens in zip(__a , __a ): gen_token_dict[task].append(__a ) SCREAMING_SNAKE_CASE_ : int = [[] for _ in range(__a )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) code_gens[task].append(remove_last_block(__a ) ) return code_gens def _A () -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser(__a ) SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric SCREAMING_SNAKE_CASE_ : Any = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing SCREAMING_SNAKE_CASE_ : str = '''false''' if args.num_workers is None: SCREAMING_SNAKE_CASE_ : Optional[Any] = multiprocessing.cpu_count() # Use dataset load to feed to accelerate SCREAMING_SNAKE_CASE_ : Tuple = Accelerator() set_seed(args.seed , device_specific=__a ) # Load model and tokenizer SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt ) SCREAMING_SNAKE_CASE_ : Dict = tokenizer.eos_token SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings SCREAMING_SNAKE_CASE_ : List[str] = { '''do_sample''': args.do_sample, '''temperature''': args.temperature, '''max_new_tokens''': args.max_new_tokens, '''top_p''': args.top_p, '''top_k''': args.top_k, '''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ), } # Load evaluation dataset and metric SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset('''openai_humaneval''' ) SCREAMING_SNAKE_CASE_ : str = load_metric('''code_eval''' ) SCREAMING_SNAKE_CASE_ : int = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] ) SCREAMING_SNAKE_CASE_ : List[str] = args.n_samples // args.batch_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = TokenizedDataset(__a , human_eval['''test'''] , n_copies=__a , n_tasks=__a ) # do not confuse args.batch_size, which is actually the num_return_sequences SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(__a , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] ) except ValueError as exception: print( '''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`''' ''' flag to enable code evaluation.''' ) raise exception SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(__a , __a ) SCREAMING_SNAKE_CASE_ : List[Any] = complete_code( __a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , ) if accelerator.is_main_process: SCREAMING_SNAKE_CASE_ : int = [] for task in tqdm(range(__a ) ): SCREAMING_SNAKE_CASE_ : Tuple = human_eval['''test'''][task]['''test'''] SCREAMING_SNAKE_CASE_ : Tuple = f'check({human_eval["test"][task]["entry_point"]})' references.append('''\n''' + test_func + '''\n''' + entry_point ) # Evaluate completions with "code_eval" metric SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = code_eval_metric.compute( references=__a , predictions=__a , num_workers=args.num_workers ) print(f'Results: {pass_at_k}' ) # Save results to json file with open(args.output_file , '''w''' ) as fp: json.dump(__a , __a ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
318
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = tempfile.mkdtemp() # fmt: off SCREAMING_SNAKE_CASE_ : Any = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens])) SCREAMING_SNAKE_CASE_ : Optional[Any] = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(self.tmpdirname , lowercase_) with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp: json.dump(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any , **lowercase_ : List[Any]): '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **lowercase_ : Optional[int]): '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] SCREAMING_SNAKE_CASE_ : int = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : int = VisionTextDualEncoderProcessor(tokenizer=lowercase_ , image_processor=lowercase_) processor.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast)) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''') SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0) SCREAMING_SNAKE_CASE_ : str = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowercase_ , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast)) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : List[str] = VisionTextDualEncoderProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : str = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : str = image_processor(lowercase_ , return_tensors='''np''') SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(images=lowercase_ , return_tensors='''np''') for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''lower newer''' SCREAMING_SNAKE_CASE_ : Tuple = processor(text=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = tokenizer(lowercase_) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.get_image_processor() SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : int = VisionTextDualEncoderProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = '''lower newer''' SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(text=lowercase_ , images=lowercase_) self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values''']) # test if it raises when no input is passed with self.assertRaises(lowercase_): processor() def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.get_image_processor() SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE_ : List[str] = processor.batch_decode(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.batch_decode(lowercase_) self.assertListEqual(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Tuple = VisionTextDualEncoderProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = '''lower newer''' SCREAMING_SNAKE_CASE_ : Any = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : List[Any] = processor(text=lowercase_ , images=lowercase_) self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
356
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["image_processor", "feature_extractor"] __UpperCamelCase = "TvltImageProcessor" __UpperCamelCase = "TvltFeatureExtractor" def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]): '''simple docstring''' super().__init__(image_processor=lowercase_ , feature_extractor=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor SCREAMING_SNAKE_CASE_ : Optional[Any] = feature_extractor def __call__( self : Any , lowercase_ : str=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : str=None , lowercase_ : int=False , lowercase_ : Union[str, Any]=False , *lowercase_ : List[Any] , **lowercase_ : List[str] , ): '''simple docstring''' if images is None and audio is None: raise ValueError('''You need to specify either an `images` or `audio` input to process.''') SCREAMING_SNAKE_CASE_ : Any = None if images is not None: SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor(lowercase_ , mask_pixel=lowercase_ , *lowercase_ , **lowercase_) if images_mixed is not None: SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor(lowercase_ , is_mixed=lowercase_ , *lowercase_ , **lowercase_) if audio is not None: SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor( lowercase_ , *lowercase_ , sampling_rate=lowercase_ , mask_audio=lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = {} if audio is not None: output_dict.update(lowercase_) if images is not None: output_dict.update(lowercase_) if images_mixed_dict is not None: output_dict.update(lowercase_) return output_dict @property def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor.model_input_names SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
318
0
"""simple docstring""" def _A (__a ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = [] SCREAMING_SNAKE_CASE_ : Optional[int] = [] SCREAMING_SNAKE_CASE_ : List[str] = { '''^''': 3, '''*''': 2, '''/''': 2, '''%''': 2, '''+''': 1, '''-''': 1, } # Priority of each operator SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(__A ) if (len(__A ) > 7) else 7 # Print table header for output print( '''Symbol'''.center(8 ) , '''Stack'''.center(__A ) , '''Postfix'''.center(__A ) , sep=''' | ''' , ) print('''-''' * (print_width * 3 + 7) ) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(__A ) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(__A ) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix stack.pop() else: if len(__A ) == 0: stack.append(__A ) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(__A ) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop() ) # pop stack & add to Postfix stack.append(__A ) # push x to stack print( x.center(8 ) , (''''''.join(__A )).ljust(__A ) , (''''''.join(__A )).ljust(__A ) , sep=''' | ''' , ) # Output in tabular format while len(__A ) > 0: # while stack is not empty post_fix.append(stack.pop() ) # pop stack & add to Postfix print( ''' '''.center(8 ) , (''''''.join(__A )).ljust(__A ) , (''''''.join(__A )).ljust(__A ) , sep=''' | ''' , ) # Output in tabular format return "".join(__A ) # return Postfix as str def _A (__a ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = list(infix[::-1] ) # reverse the infix equation for i in range(len(__A ) ): if infix[i] == "(": SCREAMING_SNAKE_CASE_ : List[Any] = ''')''' # change "(" to ")" elif infix[i] == ")": SCREAMING_SNAKE_CASE_ : Any = '''(''' # change ")" to "(" return (infix_2_postfix(''''''.join(__A ) ))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": UpperCAmelCase_ : int = input("""\nEnter an Infix Equation = """) # Input an Infix equation UpperCAmelCase_ : str = "".join(Infix.split()) # Remove spaces from the input print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
357
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "SpeechT5FeatureExtractor" __UpperCamelCase = "SpeechT5Tokenizer" def __init__( self : Any , lowercase_ : Dict , lowercase_ : Optional[Any]): '''simple docstring''' super().__init__(lowercase_ , lowercase_) def __call__( self : List[Any] , *lowercase_ : List[Any] , **lowercase_ : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''audio''' , lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('''text''' , lowercase_) SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('''text_target''' , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''audio_target''' , lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''sampling_rate''' , lowercase_) if audio is not None and text is not None: raise ValueError( '''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''') if audio_target is not None and text_target is not None: raise ValueError( '''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''') if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( '''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''') if audio is not None: SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_) elif text is not None: SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(lowercase_ , **lowercase_) else: SCREAMING_SNAKE_CASE_ : Any = None if audio_target is not None: SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor(audio_target=lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = targets['''input_values'''] elif text_target is not None: SCREAMING_SNAKE_CASE_ : int = self.tokenizer(lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = targets['''input_ids'''] else: SCREAMING_SNAKE_CASE_ : int = None if inputs is None: return targets if targets is not None: SCREAMING_SNAKE_CASE_ : Union[str, Any] = labels SCREAMING_SNAKE_CASE_ : Optional[Any] = targets.get('''attention_mask''') if decoder_attention_mask is not None: SCREAMING_SNAKE_CASE_ : Any = decoder_attention_mask return inputs def _SCREAMING_SNAKE_CASE ( self : Tuple , *lowercase_ : Tuple , **lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''input_values''' , lowercase_) SCREAMING_SNAKE_CASE_ : int = kwargs.pop('''input_ids''' , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''labels''' , lowercase_) if input_values is not None and input_ids is not None: raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''') if input_values is None and input_ids is None and labels is None: raise ValueError( '''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''') if input_values is not None: SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_) elif input_ids is not None: SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.pad(lowercase_ , **lowercase_) else: SCREAMING_SNAKE_CASE_ : List[Any] = None if labels is not None: if "input_ids" in labels or (isinstance(lowercase_ , lowercase_) and "input_ids" in labels[0]): SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer.pad(lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : Dict = targets['''input_ids'''] else: SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.feature_size SCREAMING_SNAKE_CASE_ : Optional[int] = self.feature_extractor.num_mel_bins SCREAMING_SNAKE_CASE_ : str = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : str = feature_size_hack SCREAMING_SNAKE_CASE_ : Dict = targets['''input_values'''] else: SCREAMING_SNAKE_CASE_ : List[Any] = None if inputs is None: return targets if targets is not None: SCREAMING_SNAKE_CASE_ : Dict = labels SCREAMING_SNAKE_CASE_ : List[str] = targets.get('''attention_mask''') if decoder_attention_mask is not None: SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_attention_mask return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : Tuple): '''simple docstring''' return self.tokenizer.batch_decode(*lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowercase_ : Dict , **lowercase_ : List[Any]): '''simple docstring''' return self.tokenizer.decode(*lowercase_ , **lowercase_)
318
0
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping UpperCAmelCase_ : Union[str, Any] = tuple[int, int] class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Dict , lowercase_ : set[int] , lowercase_ : Mapping[EdgeT, int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : set[int] = vertices SCREAMING_SNAKE_CASE_ : dict[EdgeT, int] = { (min(_UpperCamelCase), max(_UpperCamelCase)): weight for edge, weight in edges.items() } def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : EdgeT , lowercase_ : int): '''simple docstring''' self.vertices.add(edge[0]) self.vertices.add(edge[1]) SCREAMING_SNAKE_CASE_ : Optional[Any] = weight def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Graph = Graph({min(self.vertices)} , {}) SCREAMING_SNAKE_CASE_ : EdgeT SCREAMING_SNAKE_CASE_ : int SCREAMING_SNAKE_CASE_ : EdgeT SCREAMING_SNAKE_CASE_ : int while len(subgraph.vertices) < len(self.vertices): SCREAMING_SNAKE_CASE_ : Tuple = max(self.edges.values()) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: SCREAMING_SNAKE_CASE_ : Optional[Any] = edge SCREAMING_SNAKE_CASE_ : str = weight subgraph.add_edge(_UpperCamelCase , _UpperCamelCase) return subgraph def _A (__a = "p107_network.txt" ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = os.path.abspath(os.path.dirname(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE_ : str = os.path.join(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : dict[EdgeT, int] = {} SCREAMING_SNAKE_CASE_ : list[str] SCREAMING_SNAKE_CASE_ : int SCREAMING_SNAKE_CASE_ : int with open(lowerCamelCase_ ) as f: SCREAMING_SNAKE_CASE_ : str = f.read().strip().split('''\n''' ) SCREAMING_SNAKE_CASE_ : Tuple = [line.split(''',''' ) for line in data] for edgea in range(1 , len(lowerCamelCase_ ) ): for edgea in range(lowerCamelCase_ ): if adjaceny_matrix[edgea][edgea] != "-": SCREAMING_SNAKE_CASE_ : str = int(adjaceny_matrix[edgea][edgea] ) SCREAMING_SNAKE_CASE_ : Graph = Graph(set(range(len(lowerCamelCase_ ) ) ) , lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : Graph = graph.prims_algorithm() SCREAMING_SNAKE_CASE_ : int = sum(graph.edges.values() ) SCREAMING_SNAKE_CASE_ : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f'''{solution() = }''')
358
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def _A (__a ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = np.inf def set_batch_size(__a ) -> None: nonlocal batch_size if isinstance(__a , __a ): SCREAMING_SNAKE_CASE_ : Tuple = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__a , __a ): SCREAMING_SNAKE_CASE_ : int = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__a , __a ) and feature.dtype == "binary": SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__a , __a ) return None if batch_size is np.inf else batch_size class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Any , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Optional[int] , ): '''simple docstring''' super().__init__( lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , ) SCREAMING_SNAKE_CASE_ : Any = path_or_paths if isinstance(lowercase_ , lowercase_) else {self.split: path_or_paths} SCREAMING_SNAKE_CASE_ : Any = _PACKAGED_DATASETS_MODULES['''parquet'''][1] SCREAMING_SNAKE_CASE_ : Union[str, Any] = Parquet( cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , hash=lowercase_ , **lowercase_ , ) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' if self.streaming: SCREAMING_SNAKE_CASE_ : str = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE_ : Optional[Any] = None SCREAMING_SNAKE_CASE_ : Optional[int] = None SCREAMING_SNAKE_CASE_ : Tuple = None SCREAMING_SNAKE_CASE_ : Dict = None self.builder.download_and_prepare( download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , ) SCREAMING_SNAKE_CASE_ : Any = self.builder.as_dataset( split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory) return dataset class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Tuple , lowercase_ : Dataset , lowercase_ : Union[PathLike, BinaryIO] , lowercase_ : Optional[int] = None , **lowercase_ : Dict , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = dataset SCREAMING_SNAKE_CASE_ : Dict = path_or_buf SCREAMING_SNAKE_CASE_ : List[Any] = batch_size or get_writer_batch_size(dataset.features) SCREAMING_SNAKE_CASE_ : Any = parquet_writer_kwargs def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike)): with open(self.path_or_buf , '''wb+''') as buffer: SCREAMING_SNAKE_CASE_ : Optional[Any] = self._write(file_obj=lowercase_ , batch_size=lowercase_ , **self.parquet_writer_kwargs) else: SCREAMING_SNAKE_CASE_ : str = self._write(file_obj=self.path_or_buf , batch_size=lowercase_ , **self.parquet_writer_kwargs) return written def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : BinaryIO , lowercase_ : int , **lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = 0 SCREAMING_SNAKE_CASE_ : Optional[int] = parquet_writer_kwargs.pop('''path_or_buf''' , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.dataset.features.arrow_schema SCREAMING_SNAKE_CASE_ : Tuple = pq.ParquetWriter(lowercase_ , schema=lowercase_ , **lowercase_) for offset in logging.tqdm( range(0 , len(self.dataset) , lowercase_) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): SCREAMING_SNAKE_CASE_ : List[Any] = query_table( table=self.dataset._data , key=slice(lowercase_ , offset + batch_size) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(lowercase_) written += batch.nbytes writer.close() return written
318
0
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping UpperCAmelCase_ : Dict = tuple[int, int] class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Optional[int] , lowercase_ : Tuple , lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = vertices SCREAMING_SNAKE_CASE_ : List[str] = { (min(__lowercase), max(__lowercase)): weight for edge, weight in edges.items() } def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : List[Any]): '''simple docstring''' self.vertices.add(edge[0]) self.vertices.add(edge[1]) SCREAMING_SNAKE_CASE_ : List[str] = weight def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = Graph({min(self.vertices)} , {}) SCREAMING_SNAKE_CASE_ : int = 42 SCREAMING_SNAKE_CASE_ : Union[str, Any] = 42 SCREAMING_SNAKE_CASE_ : List[str] = 42 SCREAMING_SNAKE_CASE_ : Any = 42 while len(subgraph.vertices) < len(self.vertices): SCREAMING_SNAKE_CASE_ : Optional[Any] = max(self.edges.values()) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: SCREAMING_SNAKE_CASE_ : Tuple = edge SCREAMING_SNAKE_CASE_ : Optional[Any] = weight subgraph.add_edge(__lowercase , __lowercase) return subgraph def _A (__a = "p107_network.txt" ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = os.path.abspath(os.path.dirname(__a ) ) SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(__a , __a ) SCREAMING_SNAKE_CASE_ : Optional[int] = {} SCREAMING_SNAKE_CASE_ : Dict = 42 SCREAMING_SNAKE_CASE_ : int = 42 SCREAMING_SNAKE_CASE_ : Optional[int] = 42 with open(__a ) as f: SCREAMING_SNAKE_CASE_ : Dict = f.read().strip().split('''\n''' ) SCREAMING_SNAKE_CASE_ : int = [line.split(''',''' ) for line in data] for edgea in range(1 , len(__a ) ): for edgea in range(__a ): if adjaceny_matrix[edgea][edgea] != "-": SCREAMING_SNAKE_CASE_ : Tuple = int(adjaceny_matrix[edgea][edgea] ) SCREAMING_SNAKE_CASE_ : str = Graph(set(range(len(__a ) ) ) , __a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = graph.prims_algorithm() SCREAMING_SNAKE_CASE_ : List[str] = sum(graph.edges.values() ) SCREAMING_SNAKE_CASE_ : Dict = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f'''{solution() = }''')
359
"""simple docstring""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : str = logging.get_logger(__name__) UpperCAmelCase_ : Optional[Any] = ["""model.decoder.embed_positions.weights"""] def _A (__a ) -> Dict: """simple docstring""" if "emb" in name: SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''emb''' , '''model.decoder.embed_tokens''' ) if "transformer" in name: SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''transformer''' , '''model.decoder''' ) if "cross_attention" in name: SCREAMING_SNAKE_CASE_ : str = name.replace('''cross_attention''' , '''encoder_attn''' ) if "linear1" in name: SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''linear1''' , '''fc1''' ) if "linear2" in name: SCREAMING_SNAKE_CASE_ : str = name.replace('''linear2''' , '''fc2''' ) if "norm1" in name: SCREAMING_SNAKE_CASE_ : Any = name.replace('''norm1''' , '''self_attn_layer_norm''' ) if "norm_cross" in name: SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' ) if "norm2" in name: SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''norm2''' , '''final_layer_norm''' ) if "out_norm" in name: SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''out_norm''' , '''model.decoder.layer_norm''' ) if "linears" in name: SCREAMING_SNAKE_CASE_ : Dict = name.replace('''linears''' , '''lm_heads''' ) if "condition_provider.conditioners.description.output_proj" in name: SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' ) return name def _A (__a , __a ) -> Tuple[Dict, Dict]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(state_dict.keys() ) SCREAMING_SNAKE_CASE_ : int = {} for key in keys: SCREAMING_SNAKE_CASE_ : int = state_dict.pop(__a ) SCREAMING_SNAKE_CASE_ : int = rename_keys(__a ) if "in_proj_weight" in key: # split fused qkv proj SCREAMING_SNAKE_CASE_ : List[str] = val[:hidden_size, :] SCREAMING_SNAKE_CASE_ : List[str] = val[hidden_size : 2 * hidden_size, :] SCREAMING_SNAKE_CASE_ : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: SCREAMING_SNAKE_CASE_ : int = val else: SCREAMING_SNAKE_CASE_ : Any = val return state_dict, enc_dec_proj_state_dict def _A (__a ) -> MusicgenDecoderConfig: """simple docstring""" if checkpoint == "small": # default config values SCREAMING_SNAKE_CASE_ : Optional[int] = 10_24 SCREAMING_SNAKE_CASE_ : Tuple = 24 SCREAMING_SNAKE_CASE_ : Optional[Any] = 16 elif checkpoint == "medium": SCREAMING_SNAKE_CASE_ : List[str] = 15_36 SCREAMING_SNAKE_CASE_ : Optional[int] = 48 SCREAMING_SNAKE_CASE_ : Optional[int] = 24 elif checkpoint == "large": SCREAMING_SNAKE_CASE_ : Optional[Any] = 20_48 SCREAMING_SNAKE_CASE_ : Optional[int] = 48 SCREAMING_SNAKE_CASE_ : int = 32 else: raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' ) SCREAMING_SNAKE_CASE_ : List[Any] = MusicgenDecoderConfig( hidden_size=__a , ffn_dim=hidden_size * 4 , num_hidden_layers=__a , num_attention_heads=__a , ) return config @torch.no_grad() def _A (__a , __a=None , __a=None , __a="cpu" ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = MusicGen.get_pretrained(__a , device=__a ) SCREAMING_SNAKE_CASE_ : Dict = decoder_config_from_checkpoint(__a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = fairseq_model.lm.state_dict() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = rename_state_dict( __a , hidden_size=decoder_config.hidden_size ) SCREAMING_SNAKE_CASE_ : Optional[Any] = TaEncoderModel.from_pretrained('''t5-base''' ) SCREAMING_SNAKE_CASE_ : List[str] = EncodecModel.from_pretrained('''facebook/encodec_32khz''' ) SCREAMING_SNAKE_CASE_ : int = MusicgenForCausalLM(__a ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = decoder.load_state_dict(__a , strict=__a ) for key in missing_keys.copy(): if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__a ) if len(__a ) > 0: raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' ) if len(__a ) > 0: raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' ) # init the composite model SCREAMING_SNAKE_CASE_ : str = MusicgenForConditionalGeneration(text_encoder=__a , audio_encoder=__a , decoder=__a ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__a ) # check we can do a forward pass SCREAMING_SNAKE_CASE_ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=__a , decoder_input_ids=__a ).logits if logits.shape != (8, 1, 20_48): raise ValueError('''Incorrect shape for logits''' ) # now construct the processor SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained('''t5-base''' ) SCREAMING_SNAKE_CASE_ : str = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' ) SCREAMING_SNAKE_CASE_ : Tuple = MusicgenProcessor(feature_extractor=__a , tokenizer=__a ) # set the appropriate bos/pad token ids SCREAMING_SNAKE_CASE_ : str = 20_48 SCREAMING_SNAKE_CASE_ : List[Any] = 20_48 # set other default generation config params SCREAMING_SNAKE_CASE_ : int = int(30 * audio_encoder.config.frame_rate ) SCREAMING_SNAKE_CASE_ : str = True SCREAMING_SNAKE_CASE_ : Optional[Any] = 3.0 if pytorch_dump_folder is not None: Path(__a ).mkdir(exist_ok=__a ) logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' ) model.save_pretrained(__a ) processor.save_pretrained(__a ) if repo_id: logger.info(f'Pushing model {checkpoint} to {repo_id}' ) model.push_to_hub(__a ) processor.push_to_hub(__a ) if __name__ == "__main__": UpperCAmelCase_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint""", default="""small""", type=str, help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""", ) parser.add_argument( """--pytorch_dump_folder""", required=True, default=None, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) parser.add_argument( """--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda.""" ) UpperCAmelCase_ : Dict = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
318
0
# Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def _A (__a ) -> Optional[int]: """simple docstring""" return 1 / (1 + np.exp(-z )) def _A (__a , __a ) -> int: """simple docstring""" return (-y * np.log(__lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean() def _A (__a , __a , __a ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = np.dot(__lowerCamelCase , __lowerCamelCase ) return np.sum(y * scores - np.log(1 + np.exp(__lowerCamelCase ) ) ) def _A (__a , __a , __a , __a=7_00_00 ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros(x.shape[1] ) for iterations in range(__lowerCamelCase ): SCREAMING_SNAKE_CASE_ : int = np.dot(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE_ : List[str] = sigmoid_function(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ : Tuple = np.dot(x.T , h - y ) / y.size SCREAMING_SNAKE_CASE_ : Union[str, Any] = theta - alpha * gradient # updating the weights SCREAMING_SNAKE_CASE_ : Optional[int] = np.dot(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE_ : List[Any] = sigmoid_function(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ : Optional[int] = cost_function(__lowerCamelCase , __lowerCamelCase ) if iterations % 1_00 == 0: print(f'loss: {j} \t' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = datasets.load_iris() UpperCAmelCase_ : str = iris.data[:, :2] UpperCAmelCase_ : List[Any] = (iris.target != 0) * 1 UpperCAmelCase_ : str = 0.1 UpperCAmelCase_ : int = logistic_reg(alpha, x, y, max_iterations=70000) print("""theta: """, theta) # printing the theta i.e our weights vector def _A (__a ) -> int: """simple docstring""" return sigmoid_function( np.dot(__lowerCamelCase , __lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""") plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""") ((UpperCAmelCase_) , (UpperCAmelCase_)) : Dict = (x[:, 0].min(), x[:, 0].max()) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = (x[:, 1].min(), x[:, 1].max()) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCAmelCase_ : Optional[int] = np.c_[xxa.ravel(), xxa.ravel()] UpperCAmelCase_ : int = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""") plt.legend() plt.show()
360
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def _A (__a ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b def _A (__a ) -> np.ndarray: """simple docstring""" return (gray > 1_27) & (gray <= 2_55) def _A (__a , __a ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros_like(__a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image SCREAMING_SNAKE_CASE_ : Union[str, Any] = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): SCREAMING_SNAKE_CASE_ : Optional[Any] = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() SCREAMING_SNAKE_CASE_ : Any = int(summation > 0 ) return output if __name__ == "__main__": # read original image UpperCAmelCase_ : Dict = Path(__file__).resolve().parent / """image_data""" / """lena.jpg""" UpperCAmelCase_ : List[Any] = np.array(Image.open(lena_path)) # kernel to be applied UpperCAmelCase_ : Any = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) UpperCAmelCase_ : Tuple = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image UpperCAmelCase_ : List[str] = Image.fromarray(output).convert("""RGB""") pil_img.save("""result_dilation.png""")
318
0
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""vocab_file""": """spiece.model"""} UpperCAmelCase_ = { """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", } } UpperCAmelCase_ = { """albert-base-v1""": 512, """albert-large-v1""": 512, """albert-xlarge-v1""": 512, """albert-xxlarge-v1""": 512, """albert-base-v2""": 512, """albert-large-v2""": 512, """albert-xlarge-v2""": 512, """albert-xxlarge-v2""": 512, } UpperCAmelCase_ = """▁""" class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[Any] , lowercase_ : Any , lowercase_ : Optional[Any]=True , lowercase_ : Optional[int]=True , lowercase_ : List[Any]=False , lowercase_ : Union[str, Any]="[CLS]" , lowercase_ : List[str]="[SEP]" , lowercase_ : List[str]="<unk>" , lowercase_ : Union[str, Any]="[SEP]" , lowercase_ : List[Any]="<pad>" , lowercase_ : List[str]="[CLS]" , lowercase_ : int="[MASK]" , lowercase_ : List[Any] = None , **lowercase_ : Any , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a) if isinstance(_a , _a) else mask_token ) SCREAMING_SNAKE_CASE_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_lower_case SCREAMING_SNAKE_CASE_ : List[str] = remove_space SCREAMING_SNAKE_CASE_ : Dict = keep_accents SCREAMING_SNAKE_CASE_ : List[Any] = vocab_file SCREAMING_SNAKE_CASE_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(_a) @property def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' return len(self.sp_model) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = {self.convert_ids_to_tokens(_a): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.__dict__.copy() SCREAMING_SNAKE_CASE_ : Union[str, Any] = None return state def __setstate__( self : Optional[int] , lowercase_ : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs'''): SCREAMING_SNAKE_CASE_ : Tuple = {} SCREAMING_SNAKE_CASE_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Optional[int]): '''simple docstring''' if self.remove_space: SCREAMING_SNAKE_CASE_ : List[Any] = ''' '''.join(inputs.strip().split()) else: SCREAMING_SNAKE_CASE_ : Any = inputs SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''') if not self.keep_accents: SCREAMING_SNAKE_CASE_ : Dict = unicodedata.normalize('''NFKD''' , _a) SCREAMING_SNAKE_CASE_ : Dict = ''''''.join([c for c in outputs if not unicodedata.combining(_a)]) if self.do_lower_case: SCREAMING_SNAKE_CASE_ : int = outputs.lower() return outputs def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.preprocess_text(_a) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.encode(_a , out_type=_a) SCREAMING_SNAKE_CASE_ : Dict = [] for piece in pieces: if len(_a) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit(): SCREAMING_SNAKE_CASE_ : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , '''''')) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: SCREAMING_SNAKE_CASE_ : Dict = cur_pieces[1:] else: SCREAMING_SNAKE_CASE_ : Any = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(_a) else: new_pieces.append(_a) return new_pieces def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str): '''simple docstring''' return self.sp_model.PieceToId(_a) def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Tuple): '''simple docstring''' return self.sp_model.IdToPiece(_a) def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = [] SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_a) + token SCREAMING_SNAKE_CASE_ : Dict = True SCREAMING_SNAKE_CASE_ : str = [] else: current_sub_tokens.append(_a) SCREAMING_SNAKE_CASE_ : Union[str, Any] = False out_string += self.sp_model.decode(_a) return out_string.strip() def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Any , lowercase_ : Tuple = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = [self.sep_token_id] SCREAMING_SNAKE_CASE_ : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Dict = None , lowercase_ : Any = False): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a) if token_ids_a is not None: return [1] + ([0] * len(_a)) + [1] + ([0] * len(_a)) + [1] return [1] + ([0] * len(_a)) + [1] def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : str , lowercase_ : List[str] = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : Any = None): '''simple docstring''' if not os.path.isdir(_a): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join( _a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(_a) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , _a) elif not os.path.isfile(self.vocab_file): with open(_a , '''wb''') as fi: SCREAMING_SNAKE_CASE_ : int = self.sp_model.serialized_model_proto() fi.write(_a) return (out_vocab_file,)
361
"""simple docstring""" from collections import defaultdict def _A (__a , __a ) -> bool: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = first_str.lower().strip() SCREAMING_SNAKE_CASE_ : List[Any] = second_str.lower().strip() # Remove whitespace SCREAMING_SNAKE_CASE_ : Dict = first_str.replace(''' ''' , '''''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(__a ) != len(__a ): return False # Default values for count should be 0 SCREAMING_SNAKE_CASE_ : defaultdict[str, int] = defaultdict(__a ) # For each character in input strings, # increment count in the corresponding for i in range(len(__a ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ : Any = input("""Enter the first string """).strip() UpperCAmelCase_ : Optional[int] = input("""Enter the second string """).strip() UpperCAmelCase_ : Union[str, Any] = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
318
0
"""simple docstring""" import math def _A (__a , __a ) -> Optional[Any]: """simple docstring""" return math.pow(lowercase__ , 2 ) - a def _A (__a ) -> Tuple: """simple docstring""" return 2 * x def _A (__a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = 2.0 while start <= a: SCREAMING_SNAKE_CASE_ : List[Any] = math.pow(lowercase__ , 2 ) return start def _A (__a , __a = 99_99 , __a = 0.00_00_00_00_00_00_01 ) -> Tuple: """simple docstring""" if a < 0: raise ValueError('''math domain error''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = get_initial_point(lowercase__ ) for _ in range(lowercase__ ): SCREAMING_SNAKE_CASE_ : Dict = value SCREAMING_SNAKE_CASE_ : Optional[Any] = value - fx(lowercase__ , lowercase__ ) / fx_derivative(lowercase__ ) if abs(prev_value - value ) < tolerance: return value return value if __name__ == "__main__": from doctest import testmod testmod()
362
"""simple docstring""" import argparse from collections import defaultdict import yaml UpperCAmelCase_ : Optional[Any] = """docs/source/en/_toctree.yml""" def _A (__a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = defaultdict(__a ) for doc in model_doc: counts[doc["local"]] += 1 SCREAMING_SNAKE_CASE_ : List[Any] = [key for key, value in counts.items() if value > 1] SCREAMING_SNAKE_CASE_ : int = [] for duplicate_key in duplicates: SCREAMING_SNAKE_CASE_ : List[str] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(__a ) > 1: raise ValueError( f'{duplicate_key} is present several times in the documentation table of content at ' '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(__a , key=lambda __a : s["title"].lower() ) def _A (__a=False ) -> Tuple: """simple docstring""" with open(__a , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE_ : Dict = yaml.safe_load(f.read() ) # Get to the API doc SCREAMING_SNAKE_CASE_ : Any = 0 while content[api_idx]["title"] != "API": api_idx += 1 SCREAMING_SNAKE_CASE_ : str = content[api_idx]['''sections'''] # Then to the model doc SCREAMING_SNAKE_CASE_ : List[Any] = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 SCREAMING_SNAKE_CASE_ : Optional[int] = api_doc[model_idx]['''sections'''] SCREAMING_SNAKE_CASE_ : str = [(idx, section) for idx, section in enumerate(__a ) if '''sections''' in section] SCREAMING_SNAKE_CASE_ : Optional[Any] = False for idx, modality_doc in modalities_docs: SCREAMING_SNAKE_CASE_ : List[str] = modality_doc['''sections'''] SCREAMING_SNAKE_CASE_ : Union[str, Any] = clean_model_doc_toc(__a ) if old_modality_doc != new_modality_doc: SCREAMING_SNAKE_CASE_ : str = True if overwrite: SCREAMING_SNAKE_CASE_ : Optional[int] = new_modality_doc if diff: if overwrite: SCREAMING_SNAKE_CASE_ : List[Any] = model_doc SCREAMING_SNAKE_CASE_ : int = api_doc with open(__a , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(__a , allow_unicode=__a ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": UpperCAmelCase_ : List[str] = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCAmelCase_ : Tuple = parser.parse_args() check_model_doc(args.fix_and_overwrite)
318
0
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel UpperCAmelCase_ : Union[str, Any] = { """text_branch""": """text_model""", """audio_branch""": """audio_model.audio_encoder""", """attn""": """attention.self""", """self.proj""": """output.dense""", """attention.self_mask""": """attn_mask""", """mlp.fc1""": """intermediate.dense""", """mlp.fc2""": """output.dense""", """norm1""": """layernorm_before""", """norm2""": """layernorm_after""", """bn0""": """batch_norm""", } UpperCAmelCase_ : Any = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""") def _A (__a , __a=False ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = create_model( '''HTSAT-tiny''' , '''roberta''' , _lowerCamelCase , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=_lowerCamelCase , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def _A (__a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = {} SCREAMING_SNAKE_CASE_ : Any = R""".*sequential.(\d+).*""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: SCREAMING_SNAKE_CASE_ : List[str] = key.replace(_lowerCamelCase , _lowerCamelCase ) if re.match(_lowerCamelCase , _lowerCamelCase ): # replace sequential layers with list SCREAMING_SNAKE_CASE_ : Optional[int] = re.match(_lowerCamelCase , _lowerCamelCase ).group(1 ) SCREAMING_SNAKE_CASE_ : List[str] = key.replace(f'sequential.{sequential_layer}.' , f'layers.{int(_lowerCamelCase )//3}.linear.' ) elif re.match(_lowerCamelCase , _lowerCamelCase ): SCREAMING_SNAKE_CASE_ : Dict = int(re.match(_lowerCamelCase , _lowerCamelCase ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... SCREAMING_SNAKE_CASE_ : Tuple = 1 if projecton_layer == 0 else 2 SCREAMING_SNAKE_CASE_ : List[Any] = key.replace(f'_projection.{projecton_layer}.' , f'_projection.linear{transformers_projection_layer}.' ) if "audio" and "qkv" in key: # split qkv into query key and value SCREAMING_SNAKE_CASE_ : Any = value SCREAMING_SNAKE_CASE_ : Tuple = mixed_qkv.size(0 ) // 3 SCREAMING_SNAKE_CASE_ : Dict = mixed_qkv[:qkv_dim] SCREAMING_SNAKE_CASE_ : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2] SCREAMING_SNAKE_CASE_ : Union[str, Any] = mixed_qkv[qkv_dim * 2 :] SCREAMING_SNAKE_CASE_ : Union[str, Any] = query_layer SCREAMING_SNAKE_CASE_ : Dict = key_layer SCREAMING_SNAKE_CASE_ : Optional[int] = value_layer else: SCREAMING_SNAKE_CASE_ : Dict = value return model_state_dict def _A (__a , __a , __a , __a=False ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = init_clap(_lowerCamelCase , enable_fusion=_lowerCamelCase ) clap_model.eval() SCREAMING_SNAKE_CASE_ : int = clap_model.state_dict() SCREAMING_SNAKE_CASE_ : Dict = rename_state_dict(_lowerCamelCase ) SCREAMING_SNAKE_CASE_ : Any = ClapConfig() SCREAMING_SNAKE_CASE_ : Any = enable_fusion SCREAMING_SNAKE_CASE_ : Optional[Any] = ClapModel(_lowerCamelCase ) # ignore the spectrogram embedding layer model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) transformers_config.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase_ : Tuple = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""") UpperCAmelCase_ : List[Any] = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
363
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
318
0
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ : int = logging.get_logger(__name__) UpperCAmelCase_ : Optional[Any] = {"""vocab_file""": """spiece.model"""} UpperCAmelCase_ : Optional[Any] = { """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", } } UpperCAmelCase_ : Dict = { """albert-base-v1""": 512, """albert-large-v1""": 512, """albert-xlarge-v1""": 512, """albert-xxlarge-v1""": 512, """albert-base-v2""": 512, """albert-large-v2""": 512, """albert-xlarge-v2""": 512, """albert-xxlarge-v2""": 512, } UpperCAmelCase_ : Tuple = """▁""" class lowerCAmelCase__ ( a_ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=True , lowercase_ : int=False , lowercase_ : Optional[int]="[CLS]" , lowercase_ : List[str]="[SEP]" , lowercase_ : Dict="<unk>" , lowercase_ : Optional[Any]="[SEP]" , lowercase_ : Dict="<pad>" , lowercase_ : str="[CLS]" , lowercase_ : Optional[Any]="[MASK]" , lowercase_ : str = None , **lowercase_ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = ( AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ , normalized=lowercase_) if isinstance(lowercase_ , lowercase_) else mask_token ) SCREAMING_SNAKE_CASE_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , ) SCREAMING_SNAKE_CASE_ : Optional[int] = do_lower_case SCREAMING_SNAKE_CASE_ : Dict = remove_space SCREAMING_SNAKE_CASE_ : List[str] = keep_accents SCREAMING_SNAKE_CASE_ : Dict = vocab_file SCREAMING_SNAKE_CASE_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(lowercase_) @property def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' return len(self.sp_model) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.__dict__.copy() SCREAMING_SNAKE_CASE_ : Dict = None return state def __setstate__( self : Any , lowercase_ : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs'''): SCREAMING_SNAKE_CASE_ : Tuple = {} SCREAMING_SNAKE_CASE_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str]): '''simple docstring''' if self.remove_space: SCREAMING_SNAKE_CASE_ : Any = ''' '''.join(inputs.strip().split()) else: SCREAMING_SNAKE_CASE_ : List[Any] = inputs SCREAMING_SNAKE_CASE_ : str = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''') if not self.keep_accents: SCREAMING_SNAKE_CASE_ : Union[str, Any] = unicodedata.normalize('''NFKD''' , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''''''.join([c for c in outputs if not unicodedata.combining(lowercase_)]) if self.do_lower_case: SCREAMING_SNAKE_CASE_ : Dict = outputs.lower() return outputs def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.preprocess_text(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.sp_model.encode(lowercase_ , out_type=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = [] for piece in pieces: if len(lowercase_) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit(): SCREAMING_SNAKE_CASE_ : str = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase_ , '''''')) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: SCREAMING_SNAKE_CASE_ : List[Any] = cur_pieces[1:] else: SCREAMING_SNAKE_CASE_ : Any = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(lowercase_) else: new_pieces.append(lowercase_) return new_pieces def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Dict): '''simple docstring''' return self.sp_model.PieceToId(lowercase_) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : List[str]): '''simple docstring''' return self.sp_model.IdToPiece(lowercase_) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = [] SCREAMING_SNAKE_CASE_ : int = '''''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowercase_) + token SCREAMING_SNAKE_CASE_ : Dict = True SCREAMING_SNAKE_CASE_ : str = [] else: current_sub_tokens.append(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = False out_string += self.sp_model.decode(lowercase_) return out_string.strip() def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.sep_token_id] SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : int , lowercase_ : Optional[Any] = None , lowercase_ : List[Any] = False): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_) if token_ids_a is not None: return [1] + ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1] return [1] + ([0] * len(lowercase_)) + [1] def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Any , lowercase_ : str = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id] SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : List[str] , lowercase_ : Optional[Any] = None): '''simple docstring''' if not os.path.isdir(lowercase_): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return SCREAMING_SNAKE_CASE_ : Tuple = os.path.join( lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , lowercase_) elif not os.path.isfile(self.vocab_file): with open(lowercase_ , '''wb''') as fi: SCREAMING_SNAKE_CASE_ : Dict = self.sp_model.serialized_model_proto() fi.write(lowercase_) return (out_vocab_file,)
364
"""simple docstring""" from __future__ import annotations UpperCAmelCase_ : List[str] = list[list[int]] # assigning initial values to the grid UpperCAmelCase_ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCAmelCase_ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def _A (__a , __a , __a , __a ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def _A (__a ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def _A (__a ) -> Matrix | None: """simple docstring""" if location := find_empty_location(__a ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__a , __a , __a , __a ): SCREAMING_SNAKE_CASE_ : Tuple = digit if sudoku(__a ) is not None: return grid SCREAMING_SNAKE_CASE_ : Any = 0 return None def _A (__a ) -> None: """simple docstring""" for row in grid: for cell in row: print(__a , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") UpperCAmelCase_ : str = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
318
0
"""simple docstring""" import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase_ : List[str] = """platform""" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def _A (__a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> Optional[Any]: """simple docstring""" if attention_mask is None: SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE_ : str = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: SCREAMING_SNAKE_CASE_ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: SCREAMING_SNAKE_CASE_ : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: SCREAMING_SNAKE_CASE_ : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Any , lowercase_ : Optional[int] , lowercase_ : List[Any]=13 , lowercase_ : Optional[int]=7 , lowercase_ : Dict=True , lowercase_ : Tuple=False , lowercase_ : List[Any]=99 , lowercase_ : Dict=16 , lowercase_ : Any=2 , lowercase_ : List[Any]=4 , lowercase_ : Optional[int]=4 , lowercase_ : Dict="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=32 , lowercase_ : Tuple=2 , lowercase_ : List[Any]=1 , lowercase_ : str=0 , lowercase_ : Any=0.02 , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = parent SCREAMING_SNAKE_CASE_ : List[Any] = batch_size SCREAMING_SNAKE_CASE_ : List[str] = seq_length SCREAMING_SNAKE_CASE_ : Any = is_training SCREAMING_SNAKE_CASE_ : Tuple = use_labels SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE_ : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size SCREAMING_SNAKE_CASE_ : Dict = hidden_act SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE_ : List[Any] = eos_token_id SCREAMING_SNAKE_CASE_ : Optional[Any] = pad_token_id SCREAMING_SNAKE_CASE_ : Any = bos_token_id SCREAMING_SNAKE_CASE_ : Any = initializer_range def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size) SCREAMING_SNAKE_CASE_ : Optional[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1) SCREAMING_SNAKE_CASE_ : Tuple = shift_tokens_right(_a , 1 , 2) SCREAMING_SNAKE_CASE_ : Optional[int] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_a , ) SCREAMING_SNAKE_CASE_ : Tuple = prepare_blenderbot_inputs_dict(_a , _a , _a) return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs() return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = 20 SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class_name(_a) SCREAMING_SNAKE_CASE_ : Optional[Any] = model.encode(inputs_dict['''input_ids''']) SCREAMING_SNAKE_CASE_ : str = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) SCREAMING_SNAKE_CASE_ : Any = model.init_cache(decoder_input_ids.shape[0] , _a , _a) SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''') SCREAMING_SNAKE_CASE_ : str = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , _a , decoder_attention_mask=_a , past_key_values=_a , decoder_position_ids=_a , ) SCREAMING_SNAKE_CASE_ : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''') SCREAMING_SNAKE_CASE_ : Any = model.decode( decoder_input_ids[:, -1:] , _a , decoder_attention_mask=_a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_a , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.decode(_a , _a) SCREAMING_SNAKE_CASE_ : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}') def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = 20 SCREAMING_SNAKE_CASE_ : str = model_class_name(_a) SCREAMING_SNAKE_CASE_ : Optional[int] = model.encode(inputs_dict['''input_ids''']) SCREAMING_SNAKE_CASE_ : str = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) SCREAMING_SNAKE_CASE_ : str = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ] , axis=-1 , ) SCREAMING_SNAKE_CASE_ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _a , _a) SCREAMING_SNAKE_CASE_ : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) SCREAMING_SNAKE_CASE_ : List[str] = model.decode( decoder_input_ids[:, :-1] , _a , decoder_attention_mask=_a , past_key_values=_a , decoder_position_ids=_a , ) SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''') SCREAMING_SNAKE_CASE_ : List[str] = model.decode( decoder_input_ids[:, -1:] , _a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_a , decoder_position_ids=_a , ) SCREAMING_SNAKE_CASE_ : Dict = model.decode(_a , _a , decoder_attention_mask=_a) SCREAMING_SNAKE_CASE_ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}') @require_flax class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' __UpperCamelCase = 9_9 def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) SCREAMING_SNAKE_CASE_ : List[str] = input_ids.shape[0] SCREAMING_SNAKE_CASE_ : Dict = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self._get_config_and_data() SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxBlenderbotForConditionalGeneration(_a) SCREAMING_SNAKE_CASE_ : Dict = lm_model(input_ids=_a) SCREAMING_SNAKE_CASE_ : Optional[int] = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _a) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxBlenderbotForConditionalGeneration(_a) SCREAMING_SNAKE_CASE_ : List[str] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa) SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa) SCREAMING_SNAKE_CASE_ : Tuple = lm_model(input_ids=_a , decoder_input_ids=_a) SCREAMING_SNAKE_CASE_ : Optional[Any] = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _a) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa) SCREAMING_SNAKE_CASE_ : List[str] = shift_tokens_right(_a , 1 , 2) SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.equal(_a , 1).astype(np.floataa).sum() SCREAMING_SNAKE_CASE_ : int = np.equal(_a , 1).astype(np.floataa).sum() self.assertEqual(shifted.shape , input_ids.shape) self.assertEqual(_a , n_pad_before - 1) self.assertTrue(np.equal(shifted[:, 0] , 2).all()) @require_flax class lowerCAmelCase__ ( __lowercase , unittest.TestCase , __lowercase ): '''simple docstring''' __UpperCamelCase = True __UpperCamelCase = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) __UpperCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxBlenderbotModelTester(self) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_a , _a , _a) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_a , _a , _a) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(_a , _a) SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_a) @jax.jit def encode_jitted(lowercase_ : Tuple , lowercase_ : str=None , **lowercase_ : Union[str, Any]): return model.encode(input_ids=_a , attention_mask=_a) with self.subTest('''JIT Enabled'''): SCREAMING_SNAKE_CASE_ : Optional[Any] = encode_jitted(**_a).to_tuple() with self.subTest('''JIT Disabled'''): with jax.disable_jit(): SCREAMING_SNAKE_CASE_ : Tuple = encode_jitted(**_a).to_tuple() self.assertEqual(len(_a) , len(_a)) for jitted_output, output in zip(_a , _a): self.assertEqual(jitted_output.shape , output.shape) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): SCREAMING_SNAKE_CASE_ : List[str] = model_class(_a) SCREAMING_SNAKE_CASE_ : List[Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask''']) SCREAMING_SNAKE_CASE_ : Union[str, Any] = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]): return model.decode( decoder_input_ids=_a , decoder_attention_mask=_a , encoder_outputs=_a , ) with self.subTest('''JIT Enabled'''): SCREAMING_SNAKE_CASE_ : Optional[int] = decode_jitted(**_a).to_tuple() with self.subTest('''JIT Disabled'''): with jax.disable_jit(): SCREAMING_SNAKE_CASE_ : Dict = decode_jitted(**_a).to_tuple() self.assertEqual(len(_a) , len(_a)) for jitted_output, output in zip(_a , _a): self.assertEqual(jitted_output.shape , output.shape) @slow def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE_ : str = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''') # FlaxBlenderbotForSequenceClassification expects eos token in input_ids SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.ones((1, 1)) * model.config.eos_token_id SCREAMING_SNAKE_CASE_ : str = model(_a) self.assertIsNotNone(_a) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''') @slow def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25} SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} SCREAMING_SNAKE_CASE_ : Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_a) SCREAMING_SNAKE_CASE_ : str = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''') SCREAMING_SNAKE_CASE_ : List[Any] = ['''Sam'''] SCREAMING_SNAKE_CASE_ : str = tokenizer(_a , return_tensors='''jax''') SCREAMING_SNAKE_CASE_ : List[Any] = model.generate(**_a , **_a) SCREAMING_SNAKE_CASE_ : Optional[Any] = '''Sam is a great name. It means "sun" in Gaelic.''' SCREAMING_SNAKE_CASE_ : Dict = tokenizer.batch_decode(_a , **_a) assert generated_txt[0].strip() == tgt_text
365
"""simple docstring""" from itertools import permutations def _A (__a ) -> bool: """simple docstring""" if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False SCREAMING_SNAKE_CASE_ : List[str] = [7, 11, 13, 17] for i, test in enumerate(__a ): if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def _A (__a = 10 ) -> int: """simple docstring""" return sum( int(''''''.join(map(__a , __a ) ) ) for num in permutations(range(__a ) ) if is_substring_divisible(__a ) ) if __name__ == "__main__": print(f'''{solution() = }''')
318
0
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase = ["input_features"] def __init__( self : Tuple , lowercase_ : Dict=80 , lowercase_ : Optional[int]=16000 , lowercase_ : Tuple=160 , lowercase_ : Union[str, Any]=30 , lowercase_ : Tuple=400 , lowercase_ : str=0.0 , lowercase_ : List[str]=False , **lowercase_ : List[Any] , ): '''simple docstring''' super().__init__( feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , ) SCREAMING_SNAKE_CASE_ : int = n_fft SCREAMING_SNAKE_CASE_ : int = hop_length SCREAMING_SNAKE_CASE_ : Union[str, Any] = chunk_length SCREAMING_SNAKE_CASE_ : Optional[int] = chunk_length * sampling_rate SCREAMING_SNAKE_CASE_ : Tuple = self.n_samples // hop_length SCREAMING_SNAKE_CASE_ : List[Any] = sampling_rate SCREAMING_SNAKE_CASE_ : Optional[int] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__UpperCAmelCase , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=__UpperCAmelCase , norm='''slaney''' , mel_scale='''slaney''' , ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = spectrogram( __UpperCAmelCase , window_function(self.n_fft , '''hann''') , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , ) SCREAMING_SNAKE_CASE_ : Any = log_spec[:, :-1] SCREAMING_SNAKE_CASE_ : Dict = np.maximum(__UpperCAmelCase , log_spec.max() - 8.0) SCREAMING_SNAKE_CASE_ : Union[str, Any] = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def _SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] = 0.0): '''simple docstring''' if attention_mask is not None: SCREAMING_SNAKE_CASE_ : List[str] = np.array(__UpperCAmelCase , np.intaa) SCREAMING_SNAKE_CASE_ : int = [] for vector, length in zip(__UpperCAmelCase , attention_mask.sum(-1)): SCREAMING_SNAKE_CASE_ : List[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7) if length < normed_slice.shape[0]: SCREAMING_SNAKE_CASE_ : int = padding_value normed_input_values.append(__UpperCAmelCase) else: SCREAMING_SNAKE_CASE_ : List[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values] return normed_input_values def __call__( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[Any] = True , lowercase_ : List[str] = None , lowercase_ : Optional[Any] = None , lowercase_ : Dict = None , lowercase_ : Union[str, Any] = "max_length" , lowercase_ : Union[str, Any] = None , lowercase_ : Dict = None , lowercase_ : int = None , **lowercase_ : int , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' F' was sampled with {self.sampling_rate} and not {sampling_rate}.') else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''') SCREAMING_SNAKE_CASE_ : Dict = isinstance(__UpperCAmelCase , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}') SCREAMING_SNAKE_CASE_ : Optional[int] = is_batched_numpy or ( isinstance(__UpperCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech] elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray): SCREAMING_SNAKE_CASE_ : Tuple = np.asarray(__UpperCAmelCase , dtype=np.floataa) elif isinstance(__UpperCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): SCREAMING_SNAKE_CASE_ : Any = raw_speech.astype(np.floataa) # always return batch if not is_batched: SCREAMING_SNAKE_CASE_ : Dict = [np.asarray([raw_speech]).T] SCREAMING_SNAKE_CASE_ : List[Any] = BatchFeature({'''input_features''': raw_speech}) # convert into correct format for padding SCREAMING_SNAKE_CASE_ : Optional[int] = self.pad( __UpperCAmelCase , padding=__UpperCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: SCREAMING_SNAKE_CASE_ : List[Any] = self.zero_mean_unit_var_norm( padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , ) SCREAMING_SNAKE_CASE_ : Any = np.stack(padded_inputs['''input_features'''] , axis=0) # make sure list is in array format SCREAMING_SNAKE_CASE_ : str = padded_inputs.get('''input_features''').transpose(2 , 0 , 1) SCREAMING_SNAKE_CASE_ : Optional[int] = [self._np_extract_fbank_features(__UpperCAmelCase) for waveform in input_features[0]] if isinstance(input_features[0] , __UpperCAmelCase): SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.asarray(__UpperCAmelCase , dtype=np.floataa) for feature in input_features] else: SCREAMING_SNAKE_CASE_ : Optional[int] = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) SCREAMING_SNAKE_CASE_ : Dict = padded_inputs["""attention_mask"""][:, :: self.hop_length] if return_tensors is not None: SCREAMING_SNAKE_CASE_ : Any = padded_inputs.convert_to_tensors(__UpperCAmelCase) return padded_inputs def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = copy.deepcopy(self.__dict__) SCREAMING_SNAKE_CASE_ : Any = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
366
"""simple docstring""" UpperCAmelCase_ : List[Any] = 9.8_0_6_6_5 def _A (__a , __a , __a = g ) -> float: """simple docstring""" if fluid_density <= 0: raise ValueError('''Impossible fluid density''' ) if volume < 0: raise ValueError('''Impossible Object volume''' ) if gravity <= 0: raise ValueError('''Impossible Gravity''' ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
318
0
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow UpperCAmelCase_ : str = logging.getLogger() @unittest.skip("Temporarily disable the doc tests." ) @require_torch @require_tf @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Path , lowercase_ : Union[str, None] = None , lowercase_ : Union[List[str], None] = None , lowercase_ : Union[str, List[str], None] = None , lowercase_ : bool = True , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = [file for file in os.listdir(lowerCAmelCase__) if os.path.isfile(os.path.join(lowerCAmelCase__ , lowerCAmelCase__))] if identifier is not None: SCREAMING_SNAKE_CASE_ : Dict = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(lowerCAmelCase__ , lowerCAmelCase__): for n_ in n_identifier: SCREAMING_SNAKE_CASE_ : int = [file for file in files if n_ not in file] else: SCREAMING_SNAKE_CASE_ : str = [file for file in files if n_identifier not in file] SCREAMING_SNAKE_CASE_ : int = ignore_files or [] ignore_files.append('''__init__.py''') SCREAMING_SNAKE_CASE_ : Any = [file for file in files if file not in ignore_files] for file in files: # Open all files print('''Testing''' , lowerCAmelCase__) if only_modules: SCREAMING_SNAKE_CASE_ : Optional[Any] = file.split('''.''')[0] try: SCREAMING_SNAKE_CASE_ : List[str] = getattr(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_ : Dict = doctest.DocTestSuite(lowerCAmelCase__) SCREAMING_SNAKE_CASE_ : Optional[int] = unittest.TextTestRunner().run(lowerCAmelCase__) self.assertIs(len(result.failures) , 0) except AttributeError: logger.info(F'{module_identifier} is not a module.') else: SCREAMING_SNAKE_CASE_ : Union[str, Any] = doctest.testfile(str('''..''' / directory / file) , optionflags=doctest.ELLIPSIS) self.assertIs(result.failed , 0) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = Path('''src/transformers''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = "modeling" SCREAMING_SNAKE_CASE_ : List[Any] = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(lowerCAmelCase__ , identifier=lowerCAmelCase__ , ignore_files=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = Path('''src/transformers''') SCREAMING_SNAKE_CASE_ : Optional[Any] = "tokenization" self.analyze_directory(lowerCAmelCase__ , identifier=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = Path('''src/transformers''') SCREAMING_SNAKE_CASE_ : int = "configuration" self.analyze_directory(lowerCAmelCase__ , identifier=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = Path('''src/transformers''') SCREAMING_SNAKE_CASE_ : List[str] = ["configuration", "modeling", "tokenization"] self.analyze_directory(lowerCAmelCase__ , n_identifier=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = Path('''docs/source''') SCREAMING_SNAKE_CASE_ : Any = ["favicon.ico"] self.analyze_directory(lowerCAmelCase__ , ignore_files=lowerCAmelCase__ , only_modules=lowerCAmelCase__)
367
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCAmelCase_ : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def _A (__a ) -> Union[str, Any]: """simple docstring""" from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__a ) def _A (__a ) -> Any: """simple docstring""" from transformers.testing_utils import pytest_terminal_summary_main SCREAMING_SNAKE_CASE_ : Optional[Any] = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__a , id=__a )
318
0
"""simple docstring""" from __future__ import annotations from math import pi, sqrt def _A (__a , __a ) -> tuple: """simple docstring""" if inductance <= 0: raise ValueError('''Inductance cannot be 0 or negative''' ) elif capacitance <= 0: raise ValueError('''Capacitance cannot be 0 or negative''' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
368
"""simple docstring""" import argparse import os import re import packaging.version UpperCAmelCase_ : Any = """examples/""" UpperCAmelCase_ : Optional[int] = { """examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""), """doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } UpperCAmelCase_ : List[Any] = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } UpperCAmelCase_ : Optional[int] = """README.md""" def _A (__a , __a , __a ) -> int: """simple docstring""" with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = REPLACE_PATTERNS[pattern] SCREAMING_SNAKE_CASE_ : Optional[int] = replace.replace('''VERSION''' , __a ) SCREAMING_SNAKE_CASE_ : Tuple = re_pattern.sub(__a , __a ) with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(__a ) def _A (__a ) -> int: """simple docstring""" for folder, directories, fnames in os.walk(__a ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(__a , __a ) , __a , pattern='''examples''' ) def _A (__a , __a=False ) -> List[str]: """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__a , __a , __a ) if not patch: update_version_in_examples(__a ) def _A () -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = '''🤗 Transformers currently provides the following architectures''' SCREAMING_SNAKE_CASE_ : Optional[int] = '''1. Want to contribute a new model?''' with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: SCREAMING_SNAKE_CASE_ : Tuple = f.readlines() # Find the start of the list. SCREAMING_SNAKE_CASE_ : Tuple = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 SCREAMING_SNAKE_CASE_ : Dict = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): SCREAMING_SNAKE_CASE_ : List[Any] = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(__a ) def _A () -> List[str]: """simple docstring""" with open(REPLACE_FILES['''init'''] , '''r''' ) as f: SCREAMING_SNAKE_CASE_ : Any = f.read() SCREAMING_SNAKE_CASE_ : Dict = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0] return packaging.version.parse(__a ) def _A (__a=False ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: SCREAMING_SNAKE_CASE_ : List[Any] = default_version.base_version elif patch: SCREAMING_SNAKE_CASE_ : int = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: SCREAMING_SNAKE_CASE_ : Any = f'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. SCREAMING_SNAKE_CASE_ : int = input(f'Which version are you releasing? [{default_version}]' ) if len(__a ) == 0: SCREAMING_SNAKE_CASE_ : Optional[Any] = default_version print(f'Updating version to {version}.' ) global_version_update(__a , patch=__a ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def _A () -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = get_version() SCREAMING_SNAKE_CASE_ : Any = f'{current_version.major}.{current_version.minor + 1}.0.dev0' SCREAMING_SNAKE_CASE_ : Union[str, Any] = current_version.base_version # Check with the user we got that right. SCREAMING_SNAKE_CASE_ : int = input(f'Which version are we developing now? [{dev_version}]' ) if len(__a ) == 0: SCREAMING_SNAKE_CASE_ : Optional[int] = dev_version print(f'Updating version to {version}.' ) global_version_update(__a ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") UpperCAmelCase_ : int = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
318
0
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase_ : str = logging.get_logger(__name__) UpperCAmelCase_ : Optional[int] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } UpperCAmelCase_ : Tuple = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def _A (__a , __a , __a , __a , __a ) -> Dict: """simple docstring""" for attribute in key.split('''.''' ): SCREAMING_SNAKE_CASE_ : List[Any] = getattr(a__ , a__ ) if weight_type is not None: SCREAMING_SNAKE_CASE_ : Tuple = getattr(a__ , a__ ).shape else: SCREAMING_SNAKE_CASE_ : List[Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": SCREAMING_SNAKE_CASE_ : int = value elif weight_type == "weight_g": SCREAMING_SNAKE_CASE_ : List[str] = value elif weight_type == "weight_v": SCREAMING_SNAKE_CASE_ : Any = value elif weight_type == "bias": SCREAMING_SNAKE_CASE_ : List[Any] = value elif weight_type == "running_mean": SCREAMING_SNAKE_CASE_ : List[Any] = value elif weight_type == "running_var": SCREAMING_SNAKE_CASE_ : Optional[int] = value elif weight_type == "num_batches_tracked": SCREAMING_SNAKE_CASE_ : int = value elif weight_type == "inv_freq": SCREAMING_SNAKE_CASE_ : List[str] = value else: SCREAMING_SNAKE_CASE_ : Dict = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def _A (__a , __a , __a ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = [] SCREAMING_SNAKE_CASE_ : int = fairseq_model.state_dict() SCREAMING_SNAKE_CASE_ : str = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): SCREAMING_SNAKE_CASE_ : Optional[int] = False if "conv_layers" in name: load_conv_layer( a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == '''group''' , ) SCREAMING_SNAKE_CASE_ : Optional[int] = True else: for key, mapped_key in MAPPING.items(): SCREAMING_SNAKE_CASE_ : List[Any] = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: SCREAMING_SNAKE_CASE_ : Optional[int] = True if "*" in mapped_key: SCREAMING_SNAKE_CASE_ : List[str] = name.split(a__ )[0].split('''.''' )[-2] SCREAMING_SNAKE_CASE_ : Union[str, Any] = mapped_key.replace('''*''' , a__ ) if "pos_bias_u" in name: SCREAMING_SNAKE_CASE_ : str = None elif "pos_bias_v" in name: SCREAMING_SNAKE_CASE_ : Dict = None elif "weight_g" in name: SCREAMING_SNAKE_CASE_ : Dict = '''weight_g''' elif "weight_v" in name: SCREAMING_SNAKE_CASE_ : str = '''weight_v''' elif "bias" in name: SCREAMING_SNAKE_CASE_ : Any = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj SCREAMING_SNAKE_CASE_ : List[str] = '''weight''' elif "running_mean" in name: SCREAMING_SNAKE_CASE_ : List[str] = '''running_mean''' elif "inv_freq" in name: SCREAMING_SNAKE_CASE_ : str = '''inv_freq''' elif "running_var" in name: SCREAMING_SNAKE_CASE_ : Any = '''running_var''' elif "num_batches_tracked" in name: SCREAMING_SNAKE_CASE_ : Optional[Any] = '''num_batches_tracked''' else: SCREAMING_SNAKE_CASE_ : Optional[Any] = None set_recursively(a__ , a__ , a__ , a__ , a__ ) continue if not is_used: unused_weights.append(a__ ) logger.warning(f'Unused weights: {unused_weights}' ) def _A (__a , __a , __a , __a , __a ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = full_name.split('''conv_layers.''' )[-1] SCREAMING_SNAKE_CASE_ : str = name.split('''.''' ) SCREAMING_SNAKE_CASE_ : str = int(items[0] ) SCREAMING_SNAKE_CASE_ : Dict = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) SCREAMING_SNAKE_CASE_ : Dict = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(a__ ) @torch.no_grad() def _A (__a , __a , __a=None , __a=None , __a=True ) -> int: """simple docstring""" if config_path is not None: SCREAMING_SNAKE_CASE_ : List[str] = WavaVecaConformerConfig.from_pretrained(a__ , hidden_act='''swish''' ) else: SCREAMING_SNAKE_CASE_ : List[Any] = WavaVecaConformerConfig() if "rope" in checkpoint_path: SCREAMING_SNAKE_CASE_ : List[Any] = '''rotary''' if is_finetuned: if dict_path: SCREAMING_SNAKE_CASE_ : Union[str, Any] = Dictionary.load(a__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq SCREAMING_SNAKE_CASE_ : Tuple = target_dict.pad_index SCREAMING_SNAKE_CASE_ : List[Any] = target_dict.bos_index SCREAMING_SNAKE_CASE_ : Dict = target_dict.eos_index SCREAMING_SNAKE_CASE_ : Dict = len(target_dict.symbols ) SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(a__ , '''vocab.json''' ) if not os.path.isdir(a__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(a__ ) ) return os.makedirs(a__ , exist_ok=a__ ) SCREAMING_SNAKE_CASE_ : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched SCREAMING_SNAKE_CASE_ : Tuple = 0 SCREAMING_SNAKE_CASE_ : Any = 1 with open(a__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(a__ , a__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaCTCTokenizer( a__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=a__ , ) SCREAMING_SNAKE_CASE_ : Any = True if config.feat_extract_norm == '''layer''' else False SCREAMING_SNAKE_CASE_ : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , ) SCREAMING_SNAKE_CASE_ : Dict = WavaVecaProcessor(feature_extractor=a__ , tokenizer=a__ ) processor.save_pretrained(a__ ) SCREAMING_SNAKE_CASE_ : str = WavaVecaConformerForCTC(a__ ) else: SCREAMING_SNAKE_CASE_ : Any = WavaVecaConformerForPreTraining(a__ ) if is_finetuned: SCREAMING_SNAKE_CASE_ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: SCREAMING_SNAKE_CASE_ : Union[str, Any] = argparse.Namespace(task='''audio_pretraining''' ) SCREAMING_SNAKE_CASE_ : str = fairseq.tasks.setup_task(a__ ) SCREAMING_SNAKE_CASE_ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=a__ ) SCREAMING_SNAKE_CASE_ : List[str] = model[0].eval() recursively_load_weights(a__ , a__ , not is_finetuned ) hf_wavavec.save_pretrained(a__ ) if __name__ == "__main__": UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCAmelCase_ : Optional[int] = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
369
"""simple docstring""" from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def _A (__a , __a , __a=1e-12 ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T SCREAMING_SNAKE_CASE_ : List[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T return jnp.matmul(__a , norm_emb_a.T ) class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = jnp.floataa def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxCLIPVisionModule(self.config.vision_config) SCREAMING_SNAKE_CASE_ : Tuple = nn.Dense(self.config.projection_dim , use_bias=lowercase_ , dtype=self.dtype) SCREAMING_SNAKE_CASE_ : List[str] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param( '''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim)) SCREAMING_SNAKE_CASE_ : Dict = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,)) def __call__( self : Optional[Any] , lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.vision_model(lowercase_)[1] SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.visual_projection(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.special_care_embeds) SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.concept_embeds) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs SCREAMING_SNAKE_CASE_ : Tuple = 0.0 SCREAMING_SNAKE_CASE_ : Dict = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.round(lowercase_ , 3) SCREAMING_SNAKE_CASE_ : List[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=lowercase_) # Use a lower threshold if an image has any special care concept SCREAMING_SNAKE_CASE_ : Dict = is_special_care * 0.01 SCREAMING_SNAKE_CASE_ : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment SCREAMING_SNAKE_CASE_ : Any = jnp.round(lowercase_ , 3) SCREAMING_SNAKE_CASE_ : Dict = jnp.any(concept_scores > 0 , axis=1) return has_nsfw_concepts class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = CLIPConfig __UpperCamelCase = "clip_input" __UpperCamelCase = FlaxStableDiffusionSafetyCheckerModule def __init__( self : Union[str, Any] , lowercase_ : CLIPConfig , lowercase_ : Optional[Tuple] = None , lowercase_ : int = 0 , lowercase_ : jnp.dtype = jnp.floataa , lowercase_ : bool = True , **lowercase_ : Any , ): '''simple docstring''' if input_shape is None: SCREAMING_SNAKE_CASE_ : List[str] = (1, 224, 224, 3) SCREAMING_SNAKE_CASE_ : List[Any] = self.module_class(config=lowercase_ , dtype=lowercase_ , **lowercase_) super().__init__(lowercase_ , lowercase_ , input_shape=lowercase_ , seed=lowercase_ , dtype=lowercase_ , _do_init=_do_init) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : jax.random.KeyArray , lowercase_ : Tuple , lowercase_ : FrozenDict = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = jax.random.normal(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.random.split(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = {'''params''': params_rng, '''dropout''': dropout_rng} SCREAMING_SNAKE_CASE_ : List[Any] = self.module.init(lowercase_ , lowercase_)['''params'''] return random_params def __call__( self : List[Any] , lowercase_ : List[str] , lowercase_ : dict = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = jnp.transpose(lowercase_ , (0, 2, 3, 1)) return self.module.apply( {'''params''': params or self.params} , jnp.array(lowercase_ , dtype=jnp.floataa) , rngs={} , )
318
0
import functools def _A (__a , __a ) -> List[Any]: """simple docstring""" if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or not all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for day in days ): raise ValueError('''The parameter days should be a list of integers''' ) if len(__SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for cost in costs ): raise ValueError('''The parameter costs should be a list of three integers''' ) if len(__SCREAMING_SNAKE_CASE ) == 0: return 0 if min(__SCREAMING_SNAKE_CASE ) <= 0: raise ValueError('''All days elements should be greater than 0''' ) if max(__SCREAMING_SNAKE_CASE ) >= 3_66: raise ValueError('''All days elements should be less than 366''' ) SCREAMING_SNAKE_CASE_ : Dict = set(__SCREAMING_SNAKE_CASE ) @functools.cache def dynamic_programming(__a ) -> int: if index > 3_65: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
370
"""simple docstring""" from __future__ import annotations import queue class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Tuple , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = data SCREAMING_SNAKE_CASE_ : Tuple = None SCREAMING_SNAKE_CASE_ : Dict = None def _A () -> TreeNode: """simple docstring""" print('''\n********Press N to stop entering at any point of time********\n''' ) SCREAMING_SNAKE_CASE_ : List[Any] = input('''Enter the value of the root node: ''' ).strip().lower() SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue() SCREAMING_SNAKE_CASE_ : Union[str, Any] = TreeNode(int(__a ) ) q.put(__a ) while not q.empty(): SCREAMING_SNAKE_CASE_ : Optional[int] = q.get() SCREAMING_SNAKE_CASE_ : List[str] = f'Enter the left node of {node_found.data}: ' SCREAMING_SNAKE_CASE_ : Optional[int] = input(__a ).strip().lower() or '''n''' if check == "n": return tree_node SCREAMING_SNAKE_CASE_ : List[str] = TreeNode(int(__a ) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = left_node q.put(__a ) SCREAMING_SNAKE_CASE_ : str = f'Enter the right node of {node_found.data}: ' SCREAMING_SNAKE_CASE_ : str = input(__a ).strip().lower() or '''n''' if check == "n": return tree_node SCREAMING_SNAKE_CASE_ : Any = TreeNode(int(__a ) ) SCREAMING_SNAKE_CASE_ : int = right_node q.put(__a ) raise def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return print(node.data , end=''',''' ) pre_order(node.left ) pre_order(node.right ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return in_order(node.left ) print(node.data , end=''',''' ) in_order(node.right ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=''',''' ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue() q.put(__a ) while not q.empty(): SCREAMING_SNAKE_CASE_ : Tuple = q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue() q.put(__a ) while not q.empty(): SCREAMING_SNAKE_CASE_ : str = [] while not q.empty(): SCREAMING_SNAKE_CASE_ : List[str] = q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__a ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ : list[TreeNode] = [] SCREAMING_SNAKE_CASE_ : Union[str, Any] = node while n or stack: while n: # start from root node, find its left child print(n.data , end=''',''' ) stack.append(__a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = n.left # end of while means current node doesn't have left child SCREAMING_SNAKE_CASE_ : Tuple = stack.pop() # start to traverse its right child SCREAMING_SNAKE_CASE_ : str = n.right def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ : list[TreeNode] = [] SCREAMING_SNAKE_CASE_ : Any = node while n or stack: while n: stack.append(__a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.left SCREAMING_SNAKE_CASE_ : Any = stack.pop() print(n.data , end=''',''' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.right def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = [], [] SCREAMING_SNAKE_CASE_ : List[Any] = node stacka.append(__a ) while stacka: # to find the reversed order of post order, store it in stack2 SCREAMING_SNAKE_CASE_ : List[str] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__a ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=''',''' ) def _A (__a = "" , __a=50 , __a="*" ) -> str: """simple docstring""" if not s: return "\n" + width * char SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(width - len(__a ) - 2 , 2 ) return f'{left * char} {s} {(left + extra) * char}' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) UpperCAmelCase_ : TreeNode = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
318
0
"""simple docstring""" import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( '''split_dict''' , [ SplitDict(), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=13_37 , num_examples=42 , dataset_name='''my_dataset''' )} ), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=13_37 , num_examples=42 )} ), SplitDict({'''train''': SplitInfo()} ), ] , ) def _A (__a ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = split_dict._to_yaml_list() assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = SplitDict._from_yaml_list(UpperCAmelCase__ ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump SCREAMING_SNAKE_CASE_ : List[Any] = None # the split name of split_dict takes over the name of the split info object SCREAMING_SNAKE_CASE_ : int = split_name assert split_dict == reloaded @pytest.mark.parametrize( '''split_info''' , [SplitInfo(), SplitInfo(dataset_name=UpperCAmelCase__ ), SplitInfo(dataset_name='''my_dataset''' )] ) def _A (__a ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = asdict(SplitDict({'''train''': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
371
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = "ssube/stable-diffusion-x4-upscaler-onnx" def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any]=0): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_)) SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Any = np.array( [0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23]) assert np.abs(image_slice - expected_slice).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') SCREAMING_SNAKE_CASE_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Any = np.array( [0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Tuple = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Tuple = np.array( [0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') SCREAMING_SNAKE_CASE_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array( [0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') SCREAMING_SNAKE_CASE_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : int = np.array( [0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = ort.SessionOptions() SCREAMING_SNAKE_CASE_ : Optional[int] = False return options def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''') SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128)) # using the PNDM scheduler by default SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''A fantasy landscape, trending on artstation''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : List[Any] = pipe( prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type='''np''' , ) SCREAMING_SNAKE_CASE_ : Optional[int] = output.images SCREAMING_SNAKE_CASE_ : Optional[int] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : int = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''') SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128)) SCREAMING_SNAKE_CASE_ : Tuple = LMSDiscreteScheduler.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''') SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : int = '''A fantasy landscape, trending on artstation''' SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : int = pipe( prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type='''np''' , ) SCREAMING_SNAKE_CASE_ : Optional[int] = output.images SCREAMING_SNAKE_CASE_ : Dict = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : List[str] = np.array( [0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
318
0
"""simple docstring""" def _A (__a ) -> int: """simple docstring""" if edge <= 0 or not isinstance(__a , __a ): raise ValueError('''Length must be a positive.''' ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def _A (__a ) -> Union[str, Any]: """simple docstring""" if edge <= 0 or not isinstance(__a , __a ): raise ValueError('''Length must be a positive.''' ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
350
"""simple docstring""" from scipy.stats import pearsonr import datasets UpperCAmelCase_ : List[Any] = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ UpperCAmelCase_ : Optional[int] = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ UpperCAmelCase_ : Tuple = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float'''), '''references''': datasets.Value('''float'''), }) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=False): '''simple docstring''' if return_pvalue: SCREAMING_SNAKE_CASE_ : int = pearsonr(lowercase_ , lowercase_) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(lowercase_ , lowercase_)[0])}
318
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase_ : Optional[int] = { """funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""", """funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""", """funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""", """funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""", """funnel-transformer/intermediate""": ( """https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json""" ), """funnel-transformer/intermediate-base""": ( """https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json""" ), """funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""", """funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""", """funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""", """funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""", } class lowerCAmelCase__ ( lowercase__ ): '''simple docstring''' __UpperCamelCase = """funnel""" __UpperCamelCase = { """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", } def __init__( self : Tuple , lowercase_ : str=30522 , lowercase_ : int=[4, 4, 4] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=2 , lowercase_ : List[str]=768 , lowercase_ : int=12 , lowercase_ : Optional[int]=64 , lowercase_ : Optional[int]=3072 , lowercase_ : List[Any]="gelu_new" , lowercase_ : str=0.1 , lowercase_ : Dict=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[str]=0.1 , lowercase_ : List[str]=None , lowercase_ : Optional[Any]=1e-9 , lowercase_ : Optional[Any]="mean" , lowercase_ : Union[str, Any]="relative_shift" , lowercase_ : Dict=True , lowercase_ : List[str]=True , lowercase_ : List[str]=True , **lowercase_ : str , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_size SCREAMING_SNAKE_CASE_ : List[Any] = block_sizes SCREAMING_SNAKE_CASE_ : Optional[Any] = [1] * len(lowercase_) if block_repeats is None else block_repeats assert len(lowercase_) == len( self.block_repeats), "`block_sizes` and `block_repeats` should have the same length." SCREAMING_SNAKE_CASE_ : str = num_decoder_layers SCREAMING_SNAKE_CASE_ : List[Any] = d_model SCREAMING_SNAKE_CASE_ : Any = n_head SCREAMING_SNAKE_CASE_ : List[Any] = d_head SCREAMING_SNAKE_CASE_ : int = d_inner SCREAMING_SNAKE_CASE_ : Dict = hidden_act SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_dropout SCREAMING_SNAKE_CASE_ : Optional[int] = attention_dropout SCREAMING_SNAKE_CASE_ : Any = activation_dropout SCREAMING_SNAKE_CASE_ : int = initializer_range SCREAMING_SNAKE_CASE_ : str = initializer_std SCREAMING_SNAKE_CASE_ : Any = layer_norm_eps assert pooling_type in [ "mean", "max", ], F'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.' SCREAMING_SNAKE_CASE_ : Any = pooling_type assert attention_type in [ "relative_shift", "factorized", ], F'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.' SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_type SCREAMING_SNAKE_CASE_ : Union[str, Any] = separate_cls SCREAMING_SNAKE_CASE_ : Dict = truncate_seq SCREAMING_SNAKE_CASE_ : Any = pool_q_only super().__init__(**lowercase_) @property def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' return sum(self.block_sizes) @num_hidden_layers.setter def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Optional[int]): '''simple docstring''' raise NotImplementedError( '''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''') @property def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' return len(self.block_sizes) @num_blocks.setter def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : int): '''simple docstring''' raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''')
351
"""simple docstring""" import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class lowerCAmelCase__ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Any , lowercase_ : Dict[str, int] , lowercase_ : List[str] , lowercase_ : int = None , lowercase_ : int = None): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE_ : str = pad_token_id SCREAMING_SNAKE_CASE_ : Optional[int] = max_length SCREAMING_SNAKE_CASE_ : Dict = vocab SCREAMING_SNAKE_CASE_ : Dict = merges SCREAMING_SNAKE_CASE_ : Union[str, Any] = BytePairTokenizer(lowercase_ , lowercase_ , sequence_length=lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : GPTaTokenizer , *lowercase_ : Optional[Any] , **lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = [''' '''.join(lowercase_) for m in tokenizer.bpe_ranks.keys()] SCREAMING_SNAKE_CASE_ : str = tokenizer.get_vocab() return cls(lowercase_ , lowercase_ , *lowercase_ , **lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : int , lowercase_ : Union[str, os.PathLike] , *lowercase_ : List[str] , **lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = GPTaTokenizer.from_pretrained(lowercase_ , *lowercase_ , **lowercase_) return cls.from_tokenizer(lowercase_ , *lowercase_ , **lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : List[Any]): '''simple docstring''' return cls(**lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[Any] , lowercase_ : int = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.tf_tokenizer(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = tf.ones_like(lowercase_) if self.pad_token_id is not None: # pad the tokens up to max length SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_length if max_length is not None else self.max_length if max_length is not None: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = pad_model_inputs( lowercase_ , max_seq_length=lowercase_ , pad_value=self.pad_token_id) return {"attention_mask": attention_mask, "input_ids": input_ids}
318
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = 1 SCREAMING_SNAKE_CASE_ : Tuple = 3 SCREAMING_SNAKE_CASE_ : str = (32, 32) SCREAMING_SNAKE_CASE_ : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(a_) return image @property def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=a_ , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , ) return CLIPTextModel(a_) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ : str = self.dummy_cond_unet_upscale SCREAMING_SNAKE_CASE_ : Dict = DDPMScheduler() SCREAMING_SNAKE_CASE_ : Union[str, Any] = DDIMScheduler(prediction_type='''v_prediction''') SCREAMING_SNAKE_CASE_ : int = self.dummy_vae SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_text_encoder SCREAMING_SNAKE_CASE_ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''') SCREAMING_SNAKE_CASE_ : Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0] SCREAMING_SNAKE_CASE_ : List[str] = Image.fromarray(np.uinta(a_)).convert('''RGB''').resize((64, 64)) # make sure here that pndm scheduler skips prk SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionUpscalePipeline( unet=a_ , low_res_scheduler=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , max_noise_level=350 , ) SCREAMING_SNAKE_CASE_ : Dict = sd_pipe.to(a_) sd_pipe.set_progress_bar_config(disable=a_) SCREAMING_SNAKE_CASE_ : Dict = '''A painting of a squirrel eating a burger''' SCREAMING_SNAKE_CASE_ : str = torch.Generator(device=a_).manual_seed(0) SCREAMING_SNAKE_CASE_ : List[Any] = sd_pipe( [prompt] , image=a_ , generator=a_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) SCREAMING_SNAKE_CASE_ : Tuple = output.images SCREAMING_SNAKE_CASE_ : int = torch.Generator(device=a_).manual_seed(0) SCREAMING_SNAKE_CASE_ : Optional[Any] = sd_pipe( [prompt] , image=a_ , generator=a_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=a_ , )[0] SCREAMING_SNAKE_CASE_ : Any = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE_ : str = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) SCREAMING_SNAKE_CASE_ : int = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_cond_unet_upscale SCREAMING_SNAKE_CASE_ : Union[str, Any] = DDPMScheduler() SCREAMING_SNAKE_CASE_ : Optional[int] = DDIMScheduler(prediction_type='''v_prediction''') SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_vae SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_text_encoder SCREAMING_SNAKE_CASE_ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''') SCREAMING_SNAKE_CASE_ : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0] SCREAMING_SNAKE_CASE_ : List[str] = Image.fromarray(np.uinta(a_)).convert('''RGB''').resize((64, 64)) # make sure here that pndm scheduler skips prk SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionUpscalePipeline( unet=a_ , low_res_scheduler=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , max_noise_level=350 , ) SCREAMING_SNAKE_CASE_ : int = sd_pipe.to(a_) sd_pipe.set_progress_bar_config(disable=a_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''A painting of a squirrel eating a burger''' SCREAMING_SNAKE_CASE_ : int = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) SCREAMING_SNAKE_CASE_ : Optional[int] = output.images assert image.shape[0] == 2 SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Generator(device=a_).manual_seed(0) SCREAMING_SNAKE_CASE_ : Dict = sd_pipe( [prompt] , image=a_ , generator=a_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) SCREAMING_SNAKE_CASE_ : List[str] = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''') def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.dummy_cond_unet_upscale SCREAMING_SNAKE_CASE_ : str = DDPMScheduler() SCREAMING_SNAKE_CASE_ : Union[str, Any] = DDIMScheduler(prediction_type='''v_prediction''') SCREAMING_SNAKE_CASE_ : int = self.dummy_vae SCREAMING_SNAKE_CASE_ : Dict = self.dummy_text_encoder SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''') SCREAMING_SNAKE_CASE_ : int = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0] SCREAMING_SNAKE_CASE_ : List[str] = Image.fromarray(np.uinta(a_)).convert('''RGB''').resize((64, 64)) # put models in fp16, except vae as it overflows in fp16 SCREAMING_SNAKE_CASE_ : int = unet.half() SCREAMING_SNAKE_CASE_ : Optional[Any] = text_encoder.half() # make sure here that pndm scheduler skips prk SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionUpscalePipeline( unet=a_ , low_res_scheduler=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , max_noise_level=350 , ) SCREAMING_SNAKE_CASE_ : int = sd_pipe.to(a_) sd_pipe.set_progress_bar_config(disable=a_) SCREAMING_SNAKE_CASE_ : List[Any] = '''A painting of a squirrel eating a burger''' SCREAMING_SNAKE_CASE_ : Any = torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : Optional[Any] = sd_pipe( [prompt] , image=a_ , generator=a_ , num_inference_steps=2 , output_type='''np''' , ).images SCREAMING_SNAKE_CASE_ : str = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''') SCREAMING_SNAKE_CASE_ : Tuple = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale''' '''/upsampled_cat.npy''') SCREAMING_SNAKE_CASE_ : int = '''stabilityai/stable-diffusion-x4-upscaler''' SCREAMING_SNAKE_CASE_ : Tuple = StableDiffusionUpscalePipeline.from_pretrained(a_) pipe.to(a_) pipe.set_progress_bar_config(disable=a_) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE_ : int = '''a cat sitting on a park bench''' SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : Optional[int] = pipe( prompt=a_ , image=a_ , generator=a_ , output_type='''np''' , ) SCREAMING_SNAKE_CASE_ : str = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 1e-3 def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale''' '''/upsampled_cat_fp16.npy''') SCREAMING_SNAKE_CASE_ : Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler''' SCREAMING_SNAKE_CASE_ : Any = StableDiffusionUpscalePipeline.from_pretrained( a_ , torch_dtype=torch.floataa , ) pipe.to(a_) pipe.set_progress_bar_config(disable=a_) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE_ : Optional[Any] = '''a cat sitting on a park bench''' SCREAMING_SNAKE_CASE_ : Any = torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : List[Any] = pipe( prompt=a_ , image=a_ , generator=a_ , output_type='''np''' , ) SCREAMING_SNAKE_CASE_ : Dict = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() SCREAMING_SNAKE_CASE_ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''') SCREAMING_SNAKE_CASE_ : Optional[Any] = '''stabilityai/stable-diffusion-x4-upscaler''' SCREAMING_SNAKE_CASE_ : str = StableDiffusionUpscalePipeline.from_pretrained( a_ , torch_dtype=torch.floataa , ) pipe.to(a_) pipe.set_progress_bar_config(disable=a_) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE_ : int = '''a cat sitting on a park bench''' SCREAMING_SNAKE_CASE_ : Tuple = torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : int = pipe( prompt=a_ , image=a_ , generator=a_ , num_inference_steps=5 , output_type='''np''' , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
352
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ : Union[str, Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''的''', '''价''', '''格''', '''是''', '''15''', '''便''', '''alex''', '''##andra''', ''',''', '''。''', '''-''', '''t''', '''shirt''', ] SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens])) SCREAMING_SNAKE_CASE_ : Dict = { '''do_resize''': True, '''size''': {'''height''': 224, '''width''': 224}, '''do_center_crop''': True, '''crop_size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], '''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], '''do_convert_rgb''': True, } SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , lowercase_) with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp: json.dump(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowercase_ : str): '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowercase_ : List[Any]): '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowercase_ : str): '''simple docstring''' return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] SCREAMING_SNAKE_CASE_ : Dict = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor() SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) processor_slow.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_) SCREAMING_SNAKE_CASE_ : Any = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) processor_fast.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , lowercase_) self.assertIsInstance(processor_fast.tokenizer , lowercase_) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , lowercase_) self.assertIsInstance(processor_fast.image_processor , lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''') SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor(do_normalize=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=lowercase_) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , lowercase_) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Any = image_processor(lowercase_ , return_tensors='''np''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(images=lowercase_ , return_tensors='''np''') for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : str = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = '''Alexandra,T-shirt的价格是15便士。''' SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(lowercase_) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''Alexandra,T-shirt的价格是15便士。''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : int = processor(text=lowercase_ , images=lowercase_) self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values''']) # test if it raises when no input is passed with pytest.raises(lowercase_): processor() def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE_ : Optional[int] = processor.batch_decode(lowercase_) SCREAMING_SNAKE_CASE_ : Dict = tokenizer.batch_decode(lowercase_) self.assertListEqual(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Dict = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = '''Alexandra,T-shirt的价格是15便士。''' SCREAMING_SNAKE_CASE_ : Dict = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Dict = processor(text=lowercase_ , images=lowercase_) self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
318
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ : List[str] = logging.get_logger(__name__) UpperCAmelCase_ : Any = {"""vocab_file""": """sentencepiece.bpe.model"""} UpperCAmelCase_ : Dict = { """vocab_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""", } } UpperCAmelCase_ : str = { """camembert-base""": 512, } UpperCAmelCase_ : Dict = """▁""" class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ["input_ids", "attention_mask"] def __init__( self : int , lowercase_ : Optional[int] , lowercase_ : int="<s>" , lowercase_ : Optional[Any]="</s>" , lowercase_ : List[Any]="</s>" , lowercase_ : List[Any]="<s>" , lowercase_ : Optional[int]="<unk>" , lowercase_ : Optional[int]="<pad>" , lowercase_ : str="<mask>" , lowercase_ : List[Any]=["<s>NOTUSED", "</s>NOTUSED"] , lowercase_ : Optional[Any] = None , **lowercase_ : Any , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__) if isinstance(snake_case__ , snake_case__) else mask_token SCREAMING_SNAKE_CASE_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , ) SCREAMING_SNAKE_CASE_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(snake_case__)) SCREAMING_SNAKE_CASE_ : Any = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> SCREAMING_SNAKE_CASE_ : List[Any] = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3} SCREAMING_SNAKE_CASE_ : Tuple = len(self.fairseq_tokens_to_ids) SCREAMING_SNAKE_CASE_ : Dict = len(self.sp_model) + len(self.fairseq_tokens_to_ids) SCREAMING_SNAKE_CASE_ : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : List[str] = None): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id] SCREAMING_SNAKE_CASE_ : str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] = None , lowercase_ : Union[str, Any] = False): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__) if token_ids_a is None: return [1] + ([0] * len(snake_case__)) + [1] return [1] + ([0] * len(snake_case__)) + [1, 1] + ([0] * len(snake_case__)) + [1] def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[int] , lowercase_ : Optional[Any] = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id] SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' return len(self.fairseq_tokens_to_ids) + len(self.sp_model) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = {self.convert_ids_to_tokens(snake_case__): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Optional[Any]): '''simple docstring''' return self.sp_model.encode(snake_case__ , out_type=snake_case__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Tuple): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(snake_case__) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(snake_case__) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Any): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = [] SCREAMING_SNAKE_CASE_ : List[str] = '' SCREAMING_SNAKE_CASE_ : List[Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(snake_case__) + token SCREAMING_SNAKE_CASE_ : List[str] = True SCREAMING_SNAKE_CASE_ : Optional[int] = [] else: current_sub_tokens.append(snake_case__) SCREAMING_SNAKE_CASE_ : Union[str, Any] = False out_string += self.sp_model.decode(snake_case__) return out_string.strip() def __getstate__( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.__dict__.copy() SCREAMING_SNAKE_CASE_ : Any = None return state def __setstate__( self : Tuple , lowercase_ : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs'''): SCREAMING_SNAKE_CASE_ : Optional[int] = {} SCREAMING_SNAKE_CASE_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[int] , lowercase_ : str = None): '''simple docstring''' if not os.path.isdir(snake_case__): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return SCREAMING_SNAKE_CASE_ : Any = os.path.join( snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(snake_case__) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , snake_case__) elif not os.path.isfile(self.vocab_file): with open(snake_case__ , '''wb''') as fi: SCREAMING_SNAKE_CASE_ : Dict = self.sp_model.serialized_model_proto() fi.write(snake_case__) return (out_vocab_file,)
353
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "rwkv" __UpperCamelCase = {"max_position_embeddings": "context_length"} def __init__( self : Union[str, Any] , lowercase_ : Any=50277 , lowercase_ : str=1024 , lowercase_ : List[str]=4096 , lowercase_ : Optional[Any]=32 , lowercase_ : Any=None , lowercase_ : Any=None , lowercase_ : List[Any]=1e-5 , lowercase_ : Union[str, Any]=0 , lowercase_ : Union[str, Any]=0 , lowercase_ : int=6 , lowercase_ : Tuple=False , lowercase_ : Any=True , **lowercase_ : Any , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size SCREAMING_SNAKE_CASE_ : Any = context_length SCREAMING_SNAKE_CASE_ : int = hidden_size SCREAMING_SNAKE_CASE_ : int = num_hidden_layers SCREAMING_SNAKE_CASE_ : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size SCREAMING_SNAKE_CASE_ : int = intermediate_size if intermediate_size is not None else 4 * hidden_size SCREAMING_SNAKE_CASE_ : int = layer_norm_epsilon SCREAMING_SNAKE_CASE_ : Optional[int] = rescale_every SCREAMING_SNAKE_CASE_ : Dict = use_cache SCREAMING_SNAKE_CASE_ : Dict = bos_token_id SCREAMING_SNAKE_CASE_ : Any = eos_token_id super().__init__( tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
318
0
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ : List[Any] = BlipImageProcessor() SCREAMING_SNAKE_CASE_ : Any = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''') SCREAMING_SNAKE_CASE_ : Optional[int] = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = InstructBlipProcessor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) processor.save_pretrained(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **lowercase_ : Tuple): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase).tokenizer def _SCREAMING_SNAKE_CASE ( self : int , **lowercase_ : str): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase).image_processor def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **lowercase_ : Any): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase).qformer_tokenizer def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] SCREAMING_SNAKE_CASE_ : Dict = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1)) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''') SCREAMING_SNAKE_CASE_ : int = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0) SCREAMING_SNAKE_CASE_ : Optional[int] = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , __UpperCAmelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , __UpperCAmelCase) self.assertIsInstance(processor.qformer_tokenizer , __UpperCAmelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : List[str] = self.get_qformer_tokenizer() SCREAMING_SNAKE_CASE_ : Union[str, Any] = InstructBlipProcessor( tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase) SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Dict = image_processor(__UpperCAmelCase , return_tensors='''np''') SCREAMING_SNAKE_CASE_ : Any = processor(images=__UpperCAmelCase , return_tensors='''np''') for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = self.get_image_processor() SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Dict = self.get_qformer_tokenizer() SCREAMING_SNAKE_CASE_ : List[Any] = InstructBlipProcessor( tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase) SCREAMING_SNAKE_CASE_ : Any = '''lower newer''' SCREAMING_SNAKE_CASE_ : Optional[int] = processor(text=__UpperCAmelCase) SCREAMING_SNAKE_CASE_ : List[str] = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase) SCREAMING_SNAKE_CASE_ : Optional[Any] = qformer_tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key]) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key]) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : List[Any] = self.get_qformer_tokenizer() SCREAMING_SNAKE_CASE_ : Union[str, Any] = InstructBlipProcessor( tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase) SCREAMING_SNAKE_CASE_ : Tuple = '''lower newer''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase) self.assertListEqual( list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , ) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase): processor() def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_qformer_tokenizer() SCREAMING_SNAKE_CASE_ : Optional[Any] = InstructBlipProcessor( tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase) SCREAMING_SNAKE_CASE_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.batch_decode(__UpperCAmelCase) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.batch_decode(__UpperCAmelCase) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : int = self.get_qformer_tokenizer() SCREAMING_SNAKE_CASE_ : int = InstructBlipProcessor( tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase) SCREAMING_SNAKE_CASE_ : Optional[int] = '''lower newer''' SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase) self.assertListEqual( list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
354
"""simple docstring""" UpperCAmelCase_ : Optional[int] = 8.3_1_4_4_5_9_8 def _A (__a , __a ) -> float: """simple docstring""" if temperature < 0: raise Exception('''Temperature cannot be less than 0 K''' ) if molar_mass <= 0: raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example UpperCAmelCase_ : str = 300 UpperCAmelCase_ : str = 28 UpperCAmelCase_ : Any = rms_speed_of_molecule(temperature, molar_mass) print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
318
0
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ : str = { '''configuration_autoformer''': [ '''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AutoformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : List[Any] = [ '''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AutoformerForPrediction''', '''AutoformerModel''', '''AutoformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
355
"""simple docstring""" import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList UpperCAmelCase_ : Union[str, Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""] class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : Dict=1): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer SCREAMING_SNAKE_CASE_ : Optional[int] = dataset SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowercase_) if n_tasks is None else n_tasks SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies def __iter__( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = [] for task in range(self.n_tasks): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip()) SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''') for task in range(self.n_tasks): for _ in range(self.n_copies): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = start_length SCREAMING_SNAKE_CASE_ : List[Any] = eof_strings SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer def __call__( self : Optional[int] , lowercase_ : Any , lowercase_ : int , **lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.tokenizer.batch_decode(input_ids[:, self.start_length :]) SCREAMING_SNAKE_CASE_ : Tuple = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings)) return all(lowercase_) def _A (__a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = re.split('''(%s)''' % '''|'''.join(__a ) , __a ) # last string should be "" return "".join(string_list[:-2] ) def _A (__a , __a , __a , __a , __a , __a=20 , **__a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = defaultdict(__a ) # dict of list of generated tokens for step, batch in tqdm(enumerate(__a ) ): with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Optional[int] = batch['''ids'''].shape[-1] SCREAMING_SNAKE_CASE_ : Tuple = accelerator.unwrap_model(__a ).generate( input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__a , **__a ) # each task is generated batch_size times SCREAMING_SNAKE_CASE_ : List[Any] = batch['''task_id'''].repeat(__a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.pad_across_processes( __a , dim=1 , pad_index=tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) ) SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy() SCREAMING_SNAKE_CASE_ : Optional[Any] = generated_tasks.cpu().numpy() for task, generated_tokens in zip(__a , __a ): gen_token_dict[task].append(__a ) SCREAMING_SNAKE_CASE_ : int = [[] for _ in range(__a )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) code_gens[task].append(remove_last_block(__a ) ) return code_gens def _A () -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser(__a ) SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric SCREAMING_SNAKE_CASE_ : Any = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing SCREAMING_SNAKE_CASE_ : str = '''false''' if args.num_workers is None: SCREAMING_SNAKE_CASE_ : Optional[Any] = multiprocessing.cpu_count() # Use dataset load to feed to accelerate SCREAMING_SNAKE_CASE_ : Tuple = Accelerator() set_seed(args.seed , device_specific=__a ) # Load model and tokenizer SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt ) SCREAMING_SNAKE_CASE_ : Dict = tokenizer.eos_token SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings SCREAMING_SNAKE_CASE_ : List[str] = { '''do_sample''': args.do_sample, '''temperature''': args.temperature, '''max_new_tokens''': args.max_new_tokens, '''top_p''': args.top_p, '''top_k''': args.top_k, '''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ), } # Load evaluation dataset and metric SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset('''openai_humaneval''' ) SCREAMING_SNAKE_CASE_ : str = load_metric('''code_eval''' ) SCREAMING_SNAKE_CASE_ : int = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] ) SCREAMING_SNAKE_CASE_ : List[str] = args.n_samples // args.batch_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = TokenizedDataset(__a , human_eval['''test'''] , n_copies=__a , n_tasks=__a ) # do not confuse args.batch_size, which is actually the num_return_sequences SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(__a , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] ) except ValueError as exception: print( '''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`''' ''' flag to enable code evaluation.''' ) raise exception SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(__a , __a ) SCREAMING_SNAKE_CASE_ : List[Any] = complete_code( __a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , ) if accelerator.is_main_process: SCREAMING_SNAKE_CASE_ : int = [] for task in tqdm(range(__a ) ): SCREAMING_SNAKE_CASE_ : Tuple = human_eval['''test'''][task]['''test'''] SCREAMING_SNAKE_CASE_ : Tuple = f'check({human_eval["test"][task]["entry_point"]})' references.append('''\n''' + test_func + '''\n''' + entry_point ) # Evaluate completions with "code_eval" metric SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = code_eval_metric.compute( references=__a , predictions=__a , num_workers=args.num_workers ) print(f'Results: {pass_at_k}' ) # Save results to json file with open(args.output_file , '''w''' ) as fp: json.dump(__a , __a ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
318
0
"""simple docstring""" UpperCAmelCase_ : List[Any] = """\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n""" UpperCAmelCase_ : List[str] = [{"""type""": """code""", """content""": INSTALL_CONTENT}] UpperCAmelCase_ : Any = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
356
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["image_processor", "feature_extractor"] __UpperCamelCase = "TvltImageProcessor" __UpperCamelCase = "TvltFeatureExtractor" def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]): '''simple docstring''' super().__init__(image_processor=lowercase_ , feature_extractor=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor SCREAMING_SNAKE_CASE_ : Optional[Any] = feature_extractor def __call__( self : Any , lowercase_ : str=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : str=None , lowercase_ : int=False , lowercase_ : Union[str, Any]=False , *lowercase_ : List[Any] , **lowercase_ : List[str] , ): '''simple docstring''' if images is None and audio is None: raise ValueError('''You need to specify either an `images` or `audio` input to process.''') SCREAMING_SNAKE_CASE_ : Any = None if images is not None: SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor(lowercase_ , mask_pixel=lowercase_ , *lowercase_ , **lowercase_) if images_mixed is not None: SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor(lowercase_ , is_mixed=lowercase_ , *lowercase_ , **lowercase_) if audio is not None: SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor( lowercase_ , *lowercase_ , sampling_rate=lowercase_ , mask_audio=lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = {} if audio is not None: output_dict.update(lowercase_) if images is not None: output_dict.update(lowercase_) if images_mixed_dict is not None: output_dict.update(lowercase_) return output_dict @property def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor.model_input_names SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
318
0
"""simple docstring""" import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Any , lowercase_ : int , lowercase_ : Union[str, Any]=13 , lowercase_ : Tuple=7 , lowercase_ : Tuple=True , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : Dict=99 , lowercase_ : Union[str, Any]=64 , lowercase_ : Dict=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : Tuple=4 , lowercase_ : Any=37 , lowercase_ : Any="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : str=16 , lowercase_ : Dict=2 , lowercase_ : List[str]=0.02 , lowercase_ : List[str]=3 , lowercase_ : str=4 , lowercase_ : str=None , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = parent SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size SCREAMING_SNAKE_CASE_ : Tuple = seq_length SCREAMING_SNAKE_CASE_ : Optional[Any] = is_training SCREAMING_SNAKE_CASE_ : Optional[Any] = use_input_mask SCREAMING_SNAKE_CASE_ : Any = use_token_type_ids SCREAMING_SNAKE_CASE_ : int = use_labels SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size SCREAMING_SNAKE_CASE_ : Any = hidden_size SCREAMING_SNAKE_CASE_ : int = embedding_size SCREAMING_SNAKE_CASE_ : int = num_hidden_layers SCREAMING_SNAKE_CASE_ : Dict = num_attention_heads SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size SCREAMING_SNAKE_CASE_ : str = hidden_act SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : Tuple = max_position_embeddings SCREAMING_SNAKE_CASE_ : str = type_vocab_size SCREAMING_SNAKE_CASE_ : str = type_sequence_label_size SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range SCREAMING_SNAKE_CASE_ : Optional[Any] = num_labels SCREAMING_SNAKE_CASE_ : str = num_choices SCREAMING_SNAKE_CASE_ : List[Any] = scope def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ : List[Any] = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ : Dict = random_attention_mask([self.batch_size, self.seq_length]) SCREAMING_SNAKE_CASE_ : str = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE_ : Union[str, Any] = None SCREAMING_SNAKE_CASE_ : Any = None SCREAMING_SNAKE_CASE_ : Optional[Any] = None if self.use_labels: SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] , self.num_choices) SCREAMING_SNAKE_CASE_ : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = MegatronBertModel(config=UpperCamelCase_) model.to(UpperCamelCase_) model.eval() SCREAMING_SNAKE_CASE_ : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_) SCREAMING_SNAKE_CASE_ : Optional[Any] = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_) SCREAMING_SNAKE_CASE_ : Dict = model(UpperCamelCase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = MegatronBertForMaskedLM(config=UpperCamelCase_) model.to(UpperCamelCase_) model.eval() SCREAMING_SNAKE_CASE_ : int = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = MegatronBertForCausalLM(config=UpperCamelCase_) model.to(UpperCamelCase_) model.eval() SCREAMING_SNAKE_CASE_ : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : str , lowercase_ : List[str] , lowercase_ : int , lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = MegatronBertForNextSentencePrediction(config=UpperCamelCase_) model.to(UpperCamelCase_) model.eval() SCREAMING_SNAKE_CASE_ : Union[str, Any] = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2)) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = MegatronBertForPreTraining(config=UpperCamelCase_) model.to(UpperCamelCase_) model.eval() SCREAMING_SNAKE_CASE_ : Optional[int] = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , next_sentence_label=UpperCamelCase_ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = MegatronBertForQuestionAnswering(config=UpperCamelCase_) model.to(UpperCamelCase_) model.eval() SCREAMING_SNAKE_CASE_ : Dict = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.num_labels SCREAMING_SNAKE_CASE_ : List[str] = MegatronBertForSequenceClassification(UpperCamelCase_) model.to(UpperCamelCase_) model.eval() SCREAMING_SNAKE_CASE_ : Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.num_labels SCREAMING_SNAKE_CASE_ : List[Any] = MegatronBertForTokenClassification(config=UpperCamelCase_) model.to(UpperCamelCase_) model.eval() SCREAMING_SNAKE_CASE_ : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.num_choices SCREAMING_SNAKE_CASE_ : Optional[int] = MegatronBertForMultipleChoice(config=UpperCamelCase_) model.to(UpperCamelCase_) model.eval() SCREAMING_SNAKE_CASE_ : List[str] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE_ : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE_ : Tuple = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) : Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) __UpperCamelCase = ( { 'feature-extraction': MegatronBertModel, 'fill-mask': MegatronBertForMaskedLM, 'question-answering': MegatronBertForQuestionAnswering, 'text-classification': MegatronBertForSequenceClassification, 'text-generation': MegatronBertForCausalLM, 'token-classification': MegatronBertForTokenClassification, 'zero-shot': MegatronBertForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase = True # test_resize_embeddings = False __UpperCamelCase = False def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : str=False): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_) if return_labels: if model_class in get_values(UpperCamelCase_): SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_) return inputs_dict def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = MegatronBertModelTester(self) SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*UpperCamelCase_) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*UpperCamelCase_) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*UpperCamelCase_) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*UpperCamelCase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*UpperCamelCase_) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*UpperCamelCase_) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*UpperCamelCase_) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*UpperCamelCase_) def _A (__a ) -> Any: """simple docstring""" return torch.tensor( A__ , dtype=torch.long , device=A__ , ) UpperCAmelCase_ : Any = 1E-4 @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow @unittest.skip('''Model is not available.''') def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , UpperCamelCase_) SCREAMING_SNAKE_CASE_ : Any = MegatronBertModel.from_pretrained(UpperCamelCase_) model.to(UpperCamelCase_) model.half() SCREAMING_SNAKE_CASE_ : List[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]]) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : List[Any] = model(UpperCamelCase_)[0] SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 9, 1024)) self.assertEqual(output.shape , UpperCamelCase_) SCREAMING_SNAKE_CASE_ : Any = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28] for ii in range(3): for jj in range(3): SCREAMING_SNAKE_CASE_ : Any = output[0, ii, jj] SCREAMING_SNAKE_CASE_ : List[Any] = expected[3 * ii + jj] SCREAMING_SNAKE_CASE_ : Tuple = '''ii={} jj={} a={} b={}'''.format(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) self.assertTrue(math.isclose(UpperCamelCase_ , UpperCamelCase_ , rel_tol=UpperCamelCase_ , abs_tol=UpperCamelCase_) , msg=UpperCamelCase_)
357
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "SpeechT5FeatureExtractor" __UpperCamelCase = "SpeechT5Tokenizer" def __init__( self : Any , lowercase_ : Dict , lowercase_ : Optional[Any]): '''simple docstring''' super().__init__(lowercase_ , lowercase_) def __call__( self : List[Any] , *lowercase_ : List[Any] , **lowercase_ : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''audio''' , lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('''text''' , lowercase_) SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('''text_target''' , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''audio_target''' , lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''sampling_rate''' , lowercase_) if audio is not None and text is not None: raise ValueError( '''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''') if audio_target is not None and text_target is not None: raise ValueError( '''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''') if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( '''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''') if audio is not None: SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_) elif text is not None: SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(lowercase_ , **lowercase_) else: SCREAMING_SNAKE_CASE_ : Any = None if audio_target is not None: SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor(audio_target=lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = targets['''input_values'''] elif text_target is not None: SCREAMING_SNAKE_CASE_ : int = self.tokenizer(lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = targets['''input_ids'''] else: SCREAMING_SNAKE_CASE_ : int = None if inputs is None: return targets if targets is not None: SCREAMING_SNAKE_CASE_ : Union[str, Any] = labels SCREAMING_SNAKE_CASE_ : Optional[Any] = targets.get('''attention_mask''') if decoder_attention_mask is not None: SCREAMING_SNAKE_CASE_ : Any = decoder_attention_mask return inputs def _SCREAMING_SNAKE_CASE ( self : Tuple , *lowercase_ : Tuple , **lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''input_values''' , lowercase_) SCREAMING_SNAKE_CASE_ : int = kwargs.pop('''input_ids''' , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''labels''' , lowercase_) if input_values is not None and input_ids is not None: raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''') if input_values is None and input_ids is None and labels is None: raise ValueError( '''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''') if input_values is not None: SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_) elif input_ids is not None: SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.pad(lowercase_ , **lowercase_) else: SCREAMING_SNAKE_CASE_ : List[Any] = None if labels is not None: if "input_ids" in labels or (isinstance(lowercase_ , lowercase_) and "input_ids" in labels[0]): SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer.pad(lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : Dict = targets['''input_ids'''] else: SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.feature_size SCREAMING_SNAKE_CASE_ : Optional[int] = self.feature_extractor.num_mel_bins SCREAMING_SNAKE_CASE_ : str = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : str = feature_size_hack SCREAMING_SNAKE_CASE_ : Dict = targets['''input_values'''] else: SCREAMING_SNAKE_CASE_ : List[Any] = None if inputs is None: return targets if targets is not None: SCREAMING_SNAKE_CASE_ : Dict = labels SCREAMING_SNAKE_CASE_ : List[str] = targets.get('''attention_mask''') if decoder_attention_mask is not None: SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_attention_mask return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : Tuple): '''simple docstring''' return self.tokenizer.batch_decode(*lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowercase_ : Dict , **lowercase_ : List[Any]): '''simple docstring''' return self.tokenizer.decode(*lowercase_ , **lowercase_)
318
0
"""simple docstring""" import math def _A (__a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = [True] * n SCREAMING_SNAKE_CASE_ : Optional[int] = False SCREAMING_SNAKE_CASE_ : int = False SCREAMING_SNAKE_CASE_ : List[Any] = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = i * 2 while index < n: SCREAMING_SNAKE_CASE_ : List[Any] = False SCREAMING_SNAKE_CASE_ : Dict = index + i SCREAMING_SNAKE_CASE_ : Union[str, Any] = [2] for i in range(3 , __a , 2 ): if is_prime[i]: primes.append(__a ) return primes def _A (__a = 99_99_66_66_33_33 ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = math.floor(math.sqrt(__a ) ) + 1_00 SCREAMING_SNAKE_CASE_ : List[str] = prime_sieve(__a ) SCREAMING_SNAKE_CASE_ : Optional[int] = 0 SCREAMING_SNAKE_CASE_ : List[str] = 0 SCREAMING_SNAKE_CASE_ : Any = primes[prime_index] while (last_prime**2) <= limit: SCREAMING_SNAKE_CASE_ : Dict = primes[prime_index + 1] SCREAMING_SNAKE_CASE_ : Tuple = last_prime**2 SCREAMING_SNAKE_CASE_ : List[str] = next_prime**2 # Get numbers divisible by lps(current) SCREAMING_SNAKE_CASE_ : Optional[Any] = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) SCREAMING_SNAKE_CASE_ : Optional[int] = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps SCREAMING_SNAKE_CASE_ : Dict = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair SCREAMING_SNAKE_CASE_ : List[Any] = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
358
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def _A (__a ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = np.inf def set_batch_size(__a ) -> None: nonlocal batch_size if isinstance(__a , __a ): SCREAMING_SNAKE_CASE_ : Tuple = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__a , __a ): SCREAMING_SNAKE_CASE_ : int = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__a , __a ) and feature.dtype == "binary": SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__a , __a ) return None if batch_size is np.inf else batch_size class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Any , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Optional[int] , ): '''simple docstring''' super().__init__( lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , ) SCREAMING_SNAKE_CASE_ : Any = path_or_paths if isinstance(lowercase_ , lowercase_) else {self.split: path_or_paths} SCREAMING_SNAKE_CASE_ : Any = _PACKAGED_DATASETS_MODULES['''parquet'''][1] SCREAMING_SNAKE_CASE_ : Union[str, Any] = Parquet( cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , hash=lowercase_ , **lowercase_ , ) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' if self.streaming: SCREAMING_SNAKE_CASE_ : str = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE_ : Optional[Any] = None SCREAMING_SNAKE_CASE_ : Optional[int] = None SCREAMING_SNAKE_CASE_ : Tuple = None SCREAMING_SNAKE_CASE_ : Dict = None self.builder.download_and_prepare( download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , ) SCREAMING_SNAKE_CASE_ : Any = self.builder.as_dataset( split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory) return dataset class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Tuple , lowercase_ : Dataset , lowercase_ : Union[PathLike, BinaryIO] , lowercase_ : Optional[int] = None , **lowercase_ : Dict , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = dataset SCREAMING_SNAKE_CASE_ : Dict = path_or_buf SCREAMING_SNAKE_CASE_ : List[Any] = batch_size or get_writer_batch_size(dataset.features) SCREAMING_SNAKE_CASE_ : Any = parquet_writer_kwargs def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike)): with open(self.path_or_buf , '''wb+''') as buffer: SCREAMING_SNAKE_CASE_ : Optional[Any] = self._write(file_obj=lowercase_ , batch_size=lowercase_ , **self.parquet_writer_kwargs) else: SCREAMING_SNAKE_CASE_ : str = self._write(file_obj=self.path_or_buf , batch_size=lowercase_ , **self.parquet_writer_kwargs) return written def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : BinaryIO , lowercase_ : int , **lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = 0 SCREAMING_SNAKE_CASE_ : Optional[int] = parquet_writer_kwargs.pop('''path_or_buf''' , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.dataset.features.arrow_schema SCREAMING_SNAKE_CASE_ : Tuple = pq.ParquetWriter(lowercase_ , schema=lowercase_ , **lowercase_) for offset in logging.tqdm( range(0 , len(self.dataset) , lowercase_) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): SCREAMING_SNAKE_CASE_ : List[Any] = query_table( table=self.dataset._data , key=slice(lowercase_ , offset + batch_size) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(lowercase_) written += batch.nbytes writer.close() return written
318
0
"""simple docstring""" import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path UpperCAmelCase_ : Union[str, Any] = [ {"dataset": "wikipedia", "config_name": "20220301.de"}, {"dataset": "wikipedia", "config_name": "20220301.en"}, {"dataset": "wikipedia", "config_name": "20220301.fr"}, {"dataset": "wikipedia", "config_name": "20220301.frr"}, {"dataset": "wikipedia", "config_name": "20220301.it"}, {"dataset": "wikipedia", "config_name": "20220301.simple"}, {"dataset": "snli", "config_name": "plain_text"}, {"dataset": "eli5", "config_name": "LFQA_reddit"}, {"dataset": "wiki40b", "config_name": "en"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"}, {"dataset": "natural_questions", "config_name": "default"}, ] def _A (__a=True ) -> Tuple: """simple docstring""" if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=SCREAMING_SNAKE_CASE__ ) ) class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __UpperCamelCase = None __UpperCamelCase = None def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Any , lowercase_ : Optional[int]): '''simple docstring''' with TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE_ : Tuple = dataset_module_factory(a_ , cache_dir=a_) SCREAMING_SNAKE_CASE_ : Dict = import_main_class(dataset_module.module_path , dataset=a_) SCREAMING_SNAKE_CASE_ : Any = builder_cls( cache_dir=a_ , config_name=a_ , hash=dataset_module.hash , ) SCREAMING_SNAKE_CASE_ : List[str] = '''/'''.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=a_).replace(os.sep , '''/'''), config.DATASET_INFO_FILENAME, ]) SCREAMING_SNAKE_CASE_ : Dict = cached_path(a_ , cache_dir=a_) self.assertTrue(os.path.exists(a_)) @pytest.mark.integration def _A (__a ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple''' SCREAMING_SNAKE_CASE_ : str = dataset_module_factory('''wikipedia''' , cache_dir=_UpperCamelCase ) SCREAMING_SNAKE_CASE_ : List[str] = import_main_class(dataset_module.module_path ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = builder_cls( cache_dir=_UpperCamelCase , config_name='''20220301.frr''' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam SCREAMING_SNAKE_CASE_ : str = None builder_instance.download_and_prepare() SCREAMING_SNAKE_CASE_ : str = builder_instance.as_dataset() assert ds @pytest.mark.integration def _A (__a ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = dataset_module_factory('''wikipedia''' , cache_dir=_UpperCamelCase ) SCREAMING_SNAKE_CASE_ : str = import_main_class(dataset_module.module_path , dataset=_UpperCamelCase ) SCREAMING_SNAKE_CASE_ : Tuple = builder_cls( cache_dir=_UpperCamelCase , config_name='''20220301.frr''' , hash=dataset_module.hash , ) SCREAMING_SNAKE_CASE_ : Tuple = builder_instance.as_streaming_dataset() assert ds assert isinstance(_UpperCamelCase , _UpperCamelCase ) assert "train" in ds assert isinstance(ds['''train'''] , _UpperCamelCase ) assert next(iter(ds['''train'''] ) )
359
"""simple docstring""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : str = logging.get_logger(__name__) UpperCAmelCase_ : Optional[Any] = ["""model.decoder.embed_positions.weights"""] def _A (__a ) -> Dict: """simple docstring""" if "emb" in name: SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''emb''' , '''model.decoder.embed_tokens''' ) if "transformer" in name: SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''transformer''' , '''model.decoder''' ) if "cross_attention" in name: SCREAMING_SNAKE_CASE_ : str = name.replace('''cross_attention''' , '''encoder_attn''' ) if "linear1" in name: SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''linear1''' , '''fc1''' ) if "linear2" in name: SCREAMING_SNAKE_CASE_ : str = name.replace('''linear2''' , '''fc2''' ) if "norm1" in name: SCREAMING_SNAKE_CASE_ : Any = name.replace('''norm1''' , '''self_attn_layer_norm''' ) if "norm_cross" in name: SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' ) if "norm2" in name: SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''norm2''' , '''final_layer_norm''' ) if "out_norm" in name: SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''out_norm''' , '''model.decoder.layer_norm''' ) if "linears" in name: SCREAMING_SNAKE_CASE_ : Dict = name.replace('''linears''' , '''lm_heads''' ) if "condition_provider.conditioners.description.output_proj" in name: SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' ) return name def _A (__a , __a ) -> Tuple[Dict, Dict]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(state_dict.keys() ) SCREAMING_SNAKE_CASE_ : int = {} for key in keys: SCREAMING_SNAKE_CASE_ : int = state_dict.pop(__a ) SCREAMING_SNAKE_CASE_ : int = rename_keys(__a ) if "in_proj_weight" in key: # split fused qkv proj SCREAMING_SNAKE_CASE_ : List[str] = val[:hidden_size, :] SCREAMING_SNAKE_CASE_ : List[str] = val[hidden_size : 2 * hidden_size, :] SCREAMING_SNAKE_CASE_ : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: SCREAMING_SNAKE_CASE_ : int = val else: SCREAMING_SNAKE_CASE_ : Any = val return state_dict, enc_dec_proj_state_dict def _A (__a ) -> MusicgenDecoderConfig: """simple docstring""" if checkpoint == "small": # default config values SCREAMING_SNAKE_CASE_ : Optional[int] = 10_24 SCREAMING_SNAKE_CASE_ : Tuple = 24 SCREAMING_SNAKE_CASE_ : Optional[Any] = 16 elif checkpoint == "medium": SCREAMING_SNAKE_CASE_ : List[str] = 15_36 SCREAMING_SNAKE_CASE_ : Optional[int] = 48 SCREAMING_SNAKE_CASE_ : Optional[int] = 24 elif checkpoint == "large": SCREAMING_SNAKE_CASE_ : Optional[Any] = 20_48 SCREAMING_SNAKE_CASE_ : Optional[int] = 48 SCREAMING_SNAKE_CASE_ : int = 32 else: raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' ) SCREAMING_SNAKE_CASE_ : List[Any] = MusicgenDecoderConfig( hidden_size=__a , ffn_dim=hidden_size * 4 , num_hidden_layers=__a , num_attention_heads=__a , ) return config @torch.no_grad() def _A (__a , __a=None , __a=None , __a="cpu" ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = MusicGen.get_pretrained(__a , device=__a ) SCREAMING_SNAKE_CASE_ : Dict = decoder_config_from_checkpoint(__a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = fairseq_model.lm.state_dict() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = rename_state_dict( __a , hidden_size=decoder_config.hidden_size ) SCREAMING_SNAKE_CASE_ : Optional[Any] = TaEncoderModel.from_pretrained('''t5-base''' ) SCREAMING_SNAKE_CASE_ : List[str] = EncodecModel.from_pretrained('''facebook/encodec_32khz''' ) SCREAMING_SNAKE_CASE_ : int = MusicgenForCausalLM(__a ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = decoder.load_state_dict(__a , strict=__a ) for key in missing_keys.copy(): if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__a ) if len(__a ) > 0: raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' ) if len(__a ) > 0: raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' ) # init the composite model SCREAMING_SNAKE_CASE_ : str = MusicgenForConditionalGeneration(text_encoder=__a , audio_encoder=__a , decoder=__a ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__a ) # check we can do a forward pass SCREAMING_SNAKE_CASE_ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=__a , decoder_input_ids=__a ).logits if logits.shape != (8, 1, 20_48): raise ValueError('''Incorrect shape for logits''' ) # now construct the processor SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained('''t5-base''' ) SCREAMING_SNAKE_CASE_ : str = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' ) SCREAMING_SNAKE_CASE_ : Tuple = MusicgenProcessor(feature_extractor=__a , tokenizer=__a ) # set the appropriate bos/pad token ids SCREAMING_SNAKE_CASE_ : str = 20_48 SCREAMING_SNAKE_CASE_ : List[Any] = 20_48 # set other default generation config params SCREAMING_SNAKE_CASE_ : int = int(30 * audio_encoder.config.frame_rate ) SCREAMING_SNAKE_CASE_ : str = True SCREAMING_SNAKE_CASE_ : Optional[Any] = 3.0 if pytorch_dump_folder is not None: Path(__a ).mkdir(exist_ok=__a ) logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' ) model.save_pretrained(__a ) processor.save_pretrained(__a ) if repo_id: logger.info(f'Pushing model {checkpoint} to {repo_id}' ) model.push_to_hub(__a ) processor.push_to_hub(__a ) if __name__ == "__main__": UpperCAmelCase_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint""", default="""small""", type=str, help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""", ) parser.add_argument( """--pytorch_dump_folder""", required=True, default=None, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) parser.add_argument( """--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda.""" ) UpperCAmelCase_ : Dict = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
318
0
from collections import defaultdict def _A (__a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = 1 SCREAMING_SNAKE_CASE_ : str = True for v in tree[start]: if v not in visited: ret += dfs(lowerCAmelCase__ ) if ret % 2 == 0: cuts.append(lowerCAmelCase__ ) return ret def _A () -> Optional[int]: """simple docstring""" dfs(1 ) if __name__ == "__main__": UpperCAmelCase_ : List[str] = 10, 9 UpperCAmelCase_ : Union[str, Any] = defaultdict(list) UpperCAmelCase_ : dict[int, bool] = {} UpperCAmelCase_ : list[int] = [] UpperCAmelCase_ : Optional[int] = 0 UpperCAmelCase_ : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
360
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def _A (__a ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b def _A (__a ) -> np.ndarray: """simple docstring""" return (gray > 1_27) & (gray <= 2_55) def _A (__a , __a ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros_like(__a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image SCREAMING_SNAKE_CASE_ : Union[str, Any] = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): SCREAMING_SNAKE_CASE_ : Optional[Any] = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() SCREAMING_SNAKE_CASE_ : Any = int(summation > 0 ) return output if __name__ == "__main__": # read original image UpperCAmelCase_ : Dict = Path(__file__).resolve().parent / """image_data""" / """lena.jpg""" UpperCAmelCase_ : List[Any] = np.array(Image.open(lena_path)) # kernel to be applied UpperCAmelCase_ : Any = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) UpperCAmelCase_ : Tuple = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image UpperCAmelCase_ : List[str] = Image.fromarray(output).convert("""RGB""") pil_img.save("""result_dilation.png""")
318
0
"""simple docstring""" import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger("""transformers.models.encodec""") UpperCAmelCase_ = { 'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited', 'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size', 'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed', 'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg', } UpperCAmelCase_ = { 'encoder.model.0.conv.conv': 'encoder.layers.0.conv', 'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv', 'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv', 'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv', 'encoder.model.3.conv.conv': 'encoder.layers.3.conv', 'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv', 'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv', 'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv', 'encoder.model.6.conv.conv': 'encoder.layers.6.conv', 'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv', 'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv', 'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv', 'encoder.model.9.conv.conv': 'encoder.layers.9.conv', 'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv', 'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv', 'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv', 'encoder.model.12.conv.conv': 'encoder.layers.12.conv', 'encoder.model.13.lstm': 'encoder.layers.13.lstm', 'encoder.model.15.conv.conv': 'encoder.layers.15.conv', } UpperCAmelCase_ = { 'encoder.model.0.conv.norm': 'encoder.layers.0.norm', 'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm', 'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm', 'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm', 'encoder.model.3.conv.norm': 'encoder.layers.3.norm', 'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm', 'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm', 'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm', 'encoder.model.6.conv.norm': 'encoder.layers.6.norm', 'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm', 'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm', 'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm', 'encoder.model.9.conv.norm': 'encoder.layers.9.norm', 'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm', 'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm', 'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm', 'encoder.model.12.conv.norm': 'encoder.layers.12.norm', 'encoder.model.15.conv.norm': 'encoder.layers.15.norm', } UpperCAmelCase_ = { 'decoder.model.0.conv.conv': 'decoder.layers.0.conv', 'decoder.model.1.lstm': 'decoder.layers.1.lstm', 'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv', 'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv', 'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv', 'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv', 'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv', 'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv', 'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv', 'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv', 'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv', 'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv', 'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv', 'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv', 'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv', 'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv', 'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv', 'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv', 'decoder.model.15.conv.conv': 'decoder.layers.15.conv', } UpperCAmelCase_ = { 'decoder.model.0.conv.norm': 'decoder.layers.0.norm', 'decoder.model.3.convtr.norm': 'decoder.layers.3.norm', 'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm', 'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm', 'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm', 'decoder.model.6.convtr.norm': 'decoder.layers.6.norm', 'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm', 'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm', 'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm', 'decoder.model.9.convtr.norm': 'decoder.layers.9.norm', 'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm', 'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm', 'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm', 'decoder.model.12.convtr.norm': 'decoder.layers.12.norm', 'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm', 'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm', 'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm', 'decoder.model.15.conv.norm': 'decoder.layers.15.norm', } UpperCAmelCase_ = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } UpperCAmelCase_ = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } UpperCAmelCase_ = [] UpperCAmelCase_ = [] def _A (__a , __a , __a , __a , __a ) -> Optional[int]: """simple docstring""" for attribute in key.split('''.''' ): SCREAMING_SNAKE_CASE_ : Tuple = getattr(a__ , a__ ) if weight_type is not None: SCREAMING_SNAKE_CASE_ : int = getattr(a__ , a__ ).shape else: SCREAMING_SNAKE_CASE_ : Optional[Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": SCREAMING_SNAKE_CASE_ : Optional[Any] = value elif weight_type == "weight_g": SCREAMING_SNAKE_CASE_ : Any = value elif weight_type == "weight_v": SCREAMING_SNAKE_CASE_ : List[str] = value elif weight_type == "bias": SCREAMING_SNAKE_CASE_ : int = value elif weight_type == "running_mean": SCREAMING_SNAKE_CASE_ : str = value elif weight_type == "running_var": SCREAMING_SNAKE_CASE_ : int = value elif weight_type == "num_batches_tracked": SCREAMING_SNAKE_CASE_ : str = value elif weight_type == "weight_ih_l0": SCREAMING_SNAKE_CASE_ : str = value elif weight_type == "weight_hh_l0": SCREAMING_SNAKE_CASE_ : List[Any] = value elif weight_type == "bias_ih_l0": SCREAMING_SNAKE_CASE_ : List[str] = value elif weight_type == "bias_hh_l0": SCREAMING_SNAKE_CASE_ : Any = value elif weight_type == "weight_ih_l1": SCREAMING_SNAKE_CASE_ : Union[str, Any] = value elif weight_type == "weight_hh_l1": SCREAMING_SNAKE_CASE_ : Optional[int] = value elif weight_type == "bias_ih_l1": SCREAMING_SNAKE_CASE_ : Any = value elif weight_type == "bias_hh_l1": SCREAMING_SNAKE_CASE_ : Optional[Any] = value else: SCREAMING_SNAKE_CASE_ : int = value logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' ) def _A (__a , __a ) -> Union[str, Any]: """simple docstring""" for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def _A (__a , __a , __a ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = [] if model_name == "encodec_24khz" or "encodec_32khz": SCREAMING_SNAKE_CASE_ : Dict = MAPPING_24K elif model_name == "encodec_48khz": SCREAMING_SNAKE_CASE_ : List[str] = MAPPING_48K else: raise ValueError(f'Unsupported model: {model_name}' ) for name, value in orig_dict.items(): if should_ignore(a__ , a__ ): logger.info(f'{name} was ignored' ) continue SCREAMING_SNAKE_CASE_ : str = False for key, mapped_key in MAPPING.items(): if "*" in key: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = key.split('''.*.''' ) if prefix in name and suffix in name: SCREAMING_SNAKE_CASE_ : List[str] = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ): continue SCREAMING_SNAKE_CASE_ : Optional[int] = True if "*" in mapped_key: SCREAMING_SNAKE_CASE_ : Optional[int] = name.split(a__ )[0].split('''.''' )[-2] SCREAMING_SNAKE_CASE_ : Optional[int] = mapped_key.replace('''*''' , a__ ) if "weight_g" in name: SCREAMING_SNAKE_CASE_ : Optional[int] = '''weight_g''' elif "weight_v" in name: SCREAMING_SNAKE_CASE_ : Optional[Any] = '''weight_v''' elif "weight_ih_l0" in name: SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''weight_ih_l0''' elif "weight_hh_l0" in name: SCREAMING_SNAKE_CASE_ : Optional[int] = '''weight_hh_l0''' elif "bias_ih_l0" in name: SCREAMING_SNAKE_CASE_ : Optional[Any] = '''bias_ih_l0''' elif "bias_hh_l0" in name: SCREAMING_SNAKE_CASE_ : Optional[int] = '''bias_hh_l0''' elif "weight_ih_l1" in name: SCREAMING_SNAKE_CASE_ : Optional[int] = '''weight_ih_l1''' elif "weight_hh_l1" in name: SCREAMING_SNAKE_CASE_ : List[str] = '''weight_hh_l1''' elif "bias_ih_l1" in name: SCREAMING_SNAKE_CASE_ : Optional[Any] = '''bias_ih_l1''' elif "bias_hh_l1" in name: SCREAMING_SNAKE_CASE_ : Dict = '''bias_hh_l1''' elif "bias" in name: SCREAMING_SNAKE_CASE_ : Optional[int] = '''bias''' elif "weight" in name: SCREAMING_SNAKE_CASE_ : Tuple = '''weight''' elif "running_mean" in name: SCREAMING_SNAKE_CASE_ : List[str] = '''running_mean''' elif "running_var" in name: SCREAMING_SNAKE_CASE_ : int = '''running_var''' elif "num_batches_tracked" in name: SCREAMING_SNAKE_CASE_ : Tuple = '''num_batches_tracked''' else: SCREAMING_SNAKE_CASE_ : Union[str, Any] = None set_recursively(a__ , a__ , a__ , a__ , a__ ) continue if not is_used: unused_weights.append(a__ ) logger.warning(f'Unused weights: {unused_weights}' ) @torch.no_grad() def _A (__a , __a , __a , __a=None , __a=None , ) -> Optional[Any]: """simple docstring""" if config_path is not None: SCREAMING_SNAKE_CASE_ : Tuple = EncodecConfig.from_pretrained(a__ ) else: SCREAMING_SNAKE_CASE_ : Any = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": SCREAMING_SNAKE_CASE_ : Tuple = [8, 5, 4, 4] SCREAMING_SNAKE_CASE_ : List[str] = [2.2] SCREAMING_SNAKE_CASE_ : Dict = 64 SCREAMING_SNAKE_CASE_ : int = 3_20_00 SCREAMING_SNAKE_CASE_ : Union[str, Any] = 20_48 SCREAMING_SNAKE_CASE_ : Any = False SCREAMING_SNAKE_CASE_ : List[str] = False SCREAMING_SNAKE_CASE_ : Optional[int] = False elif model_name == "encodec_48khz": SCREAMING_SNAKE_CASE_ : List[str] = [8, 5, 4, 2] SCREAMING_SNAKE_CASE_ : Optional[int] = [3.0, 6.0, 12.0, 24.0] SCREAMING_SNAKE_CASE_ : Optional[int] = 4_80_00 SCREAMING_SNAKE_CASE_ : int = 2 SCREAMING_SNAKE_CASE_ : Any = False SCREAMING_SNAKE_CASE_ : List[str] = '''time_group_norm''' SCREAMING_SNAKE_CASE_ : Optional[int] = True SCREAMING_SNAKE_CASE_ : Optional[int] = 1.0 SCREAMING_SNAKE_CASE_ : List[Any] = 0.01 else: raise ValueError(f'Unknown model name: {model_name}' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = EncodecModel(a__ ) SCREAMING_SNAKE_CASE_ : List[str] = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(a__ ) SCREAMING_SNAKE_CASE_ : int = torch.load(a__ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights SCREAMING_SNAKE_CASE_ : Optional[Any] = original_checkpoint['''best_state'''] recursively_load_weights(a__ , a__ , a__ ) model.save_pretrained(a__ ) if repo_id: print('''Pushing to the hub...''' ) feature_extractor.push_to_hub(a__ ) model.push_to_hub(a__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument( """--model""", default="""encodec_24khz""", type=str, help="""The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.""", ) parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) UpperCAmelCase_ = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
361
"""simple docstring""" from collections import defaultdict def _A (__a , __a ) -> bool: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = first_str.lower().strip() SCREAMING_SNAKE_CASE_ : List[Any] = second_str.lower().strip() # Remove whitespace SCREAMING_SNAKE_CASE_ : Dict = first_str.replace(''' ''' , '''''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(__a ) != len(__a ): return False # Default values for count should be 0 SCREAMING_SNAKE_CASE_ : defaultdict[str, int] = defaultdict(__a ) # For each character in input strings, # increment count in the corresponding for i in range(len(__a ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ : Any = input("""Enter the first string """).strip() UpperCAmelCase_ : Optional[int] = input("""Enter the second string """).strip() UpperCAmelCase_ : Union[str, Any] = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
318
0
"""simple docstring""" from __future__ import annotations def _A (__a , __a = None , __a = None ) -> None: """simple docstring""" if start is None: SCREAMING_SNAKE_CASE_ : List[str] = 0 if end is None: SCREAMING_SNAKE_CASE_ : int = len(lowercase_ ) - 1 if start >= end: return SCREAMING_SNAKE_CASE_ : Any = (start + end) // 2 slowsort(lowercase_ , lowercase_ , lowercase_ ) slowsort(lowercase_ , mid + 1 , lowercase_ ) if sequence[end] < sequence[mid]: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = sequence[mid], sequence[end] slowsort(lowercase_ , lowercase_ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
362
"""simple docstring""" import argparse from collections import defaultdict import yaml UpperCAmelCase_ : Optional[Any] = """docs/source/en/_toctree.yml""" def _A (__a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = defaultdict(__a ) for doc in model_doc: counts[doc["local"]] += 1 SCREAMING_SNAKE_CASE_ : List[Any] = [key for key, value in counts.items() if value > 1] SCREAMING_SNAKE_CASE_ : int = [] for duplicate_key in duplicates: SCREAMING_SNAKE_CASE_ : List[str] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(__a ) > 1: raise ValueError( f'{duplicate_key} is present several times in the documentation table of content at ' '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(__a , key=lambda __a : s["title"].lower() ) def _A (__a=False ) -> Tuple: """simple docstring""" with open(__a , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE_ : Dict = yaml.safe_load(f.read() ) # Get to the API doc SCREAMING_SNAKE_CASE_ : Any = 0 while content[api_idx]["title"] != "API": api_idx += 1 SCREAMING_SNAKE_CASE_ : str = content[api_idx]['''sections'''] # Then to the model doc SCREAMING_SNAKE_CASE_ : List[Any] = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 SCREAMING_SNAKE_CASE_ : Optional[int] = api_doc[model_idx]['''sections'''] SCREAMING_SNAKE_CASE_ : str = [(idx, section) for idx, section in enumerate(__a ) if '''sections''' in section] SCREAMING_SNAKE_CASE_ : Optional[Any] = False for idx, modality_doc in modalities_docs: SCREAMING_SNAKE_CASE_ : List[str] = modality_doc['''sections'''] SCREAMING_SNAKE_CASE_ : Union[str, Any] = clean_model_doc_toc(__a ) if old_modality_doc != new_modality_doc: SCREAMING_SNAKE_CASE_ : str = True if overwrite: SCREAMING_SNAKE_CASE_ : Optional[int] = new_modality_doc if diff: if overwrite: SCREAMING_SNAKE_CASE_ : List[Any] = model_doc SCREAMING_SNAKE_CASE_ : int = api_doc with open(__a , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(__a , allow_unicode=__a ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": UpperCAmelCase_ : List[str] = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCAmelCase_ : Tuple = parser.parse_args() check_model_doc(args.fix_and_overwrite)
318
0
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu UpperCAmelCase_ : str = [ """EAGER""", """AOT_EAGER""", """INDUCTOR""", """NVFUSER""", """AOT_NVFUSER""", """AOT_CUDAGRAPHS""", """OFI""", """FX2TRT""", """ONNXRT""", """IPEX""", ] def _A (__a , __a=None , __a=None , __a=None ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = True while ask_again: SCREAMING_SNAKE_CASE_ : Any = input(UpperCamelCase__ ) try: if default is not None and len(UpperCamelCase__ ) == 0: return default return convert_value(UpperCamelCase__ ) if convert_value is not None else result except Exception: if error_message is not None: print(UpperCamelCase__ ) def _A (__a , __a=[] , __a=None , __a=0 ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = BulletMenu(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = menu.run(default_choice=UpperCamelCase__ ) return convert_value(UpperCamelCase__ ) if convert_value is not None else result def _A (__a ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = int(UpperCamelCase__ ) return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] ) def _A (__a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = int(UpperCamelCase__ ) return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] ) def _A (__a ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = int(UpperCamelCase__ ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def _A (__a ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = int(UpperCamelCase__ ) return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] ) def _A (__a ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = int(UpperCamelCase__ ) return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] ) def _A (__a ) -> List[Any]: """simple docstring""" return {"yes": True, "no": False}[value.lower()] class lowerCAmelCase__ ( argparse.RawDescriptionHelpFormatter ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = super()._format_usage(_a , _a , _a , _a) SCREAMING_SNAKE_CASE_ : List[Any] = usage.replace('''<command> [<args>] ''' , '''''') return usage
363
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
318
0
"""simple docstring""" from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging UpperCAmelCase_ : Tuple = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self : Any , lowercase_ : Tuple = 101): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = length def __len__( self : Dict): '''simple docstring''' return self.length def __getitem__( self : Tuple , lowercase_ : int): '''simple docstring''' return i class lowerCAmelCase__ : '''simple docstring''' def __call__( self : Any , lowercase_ : Tuple): '''simple docstring''' return {"input_ids": torch.tensor(_SCREAMING_SNAKE_CASE), "labels": torch.tensor(_SCREAMING_SNAKE_CASE)} class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : int): '''simple docstring''' super().__init__() # Add some (unused) params otherwise DDP will complain. SCREAMING_SNAKE_CASE_ : Any = nn.Linear(120 , 80) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : str , lowercase_ : Optional[Any]=None): '''simple docstring''' if labels is not None: return torch.tensor(0.0 , device=input_ids.device), input_ids else: return input_ids class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @require_torch_neuroncore def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split() SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE_ : Tuple = F'--output_dir {output_dir}'.split() SCREAMING_SNAKE_CASE_ : Tuple = ["torchrun"] + distributed_args + args execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @require_torch_multi_gpu def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split() SCREAMING_SNAKE_CASE_ : Any = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE_ : List[Any] = F'--output_dir {output_dir}'.split() SCREAMING_SNAKE_CASE_ : str = ["torchrun"] + distributed_args + args execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py UpperCAmelCase_ : int = HfArgumentParser((TrainingArguments,)) UpperCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0] logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, ''' f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}''' ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: UpperCAmelCase_ : Tuple = DummyDataset(dataset_length) def _A (__a ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = list(range(len(_a ) ) ) SCREAMING_SNAKE_CASE_ : Tuple = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( '''Predictions and/or labels do not match expected results:\n - predictions: ''' f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' ) return {"success": success} UpperCAmelCase_ : Union[str, Any] = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) UpperCAmelCase_ : Dict = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) UpperCAmelCase_ : Any = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) UpperCAmelCase_ : List[str] = 2 UpperCAmelCase_ : List[Any] = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) UpperCAmelCase_ : Union[str, Any] = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) UpperCAmelCase_ : List[Any] = None
364
"""simple docstring""" from __future__ import annotations UpperCAmelCase_ : List[str] = list[list[int]] # assigning initial values to the grid UpperCAmelCase_ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCAmelCase_ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def _A (__a , __a , __a , __a ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def _A (__a ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def _A (__a ) -> Matrix | None: """simple docstring""" if location := find_empty_location(__a ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__a , __a , __a , __a ): SCREAMING_SNAKE_CASE_ : Tuple = digit if sudoku(__a ) is not None: return grid SCREAMING_SNAKE_CASE_ : Any = 0 return None def _A (__a ) -> None: """simple docstring""" for row in grid: for cell in row: print(__a , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") UpperCAmelCase_ : str = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
318
0
"""simple docstring""" from __future__ import annotations def _A (__a , __a , __a ) -> List[Any]: """simple docstring""" if (voltage, current, resistance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if resistance < 0: raise ValueError('''Resistance cannot be negative''' ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
365
"""simple docstring""" from itertools import permutations def _A (__a ) -> bool: """simple docstring""" if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False SCREAMING_SNAKE_CASE_ : List[str] = [7, 11, 13, 17] for i, test in enumerate(__a ): if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def _A (__a = 10 ) -> int: """simple docstring""" return sum( int(''''''.join(map(__a , __a ) ) ) for num in permutations(range(__a ) ) if is_substring_divisible(__a ) ) if __name__ == "__main__": print(f'''{solution() = }''')
318
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : str , lowercase_ : Optional[Any] , lowercase_ : Optional[int]=13 , lowercase_ : List[str]=3 , lowercase_ : Tuple=224 , lowercase_ : Optional[int]=30 , lowercase_ : Dict=400 , lowercase_ : Union[str, Any]=True , lowercase_ : Dict=None , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=[0.5, 0.5, 0.5] , lowercase_ : Any=[0.5, 0.5, 0.5] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 18} SCREAMING_SNAKE_CASE_ : List[Any] = parent SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size SCREAMING_SNAKE_CASE_ : str = num_channels SCREAMING_SNAKE_CASE_ : List[str] = image_size SCREAMING_SNAKE_CASE_ : int = min_resolution SCREAMING_SNAKE_CASE_ : Optional[int] = max_resolution SCREAMING_SNAKE_CASE_ : Optional[Any] = do_resize SCREAMING_SNAKE_CASE_ : Any = size SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_normalize SCREAMING_SNAKE_CASE_ : int = image_mean SCREAMING_SNAKE_CASE_ : int = image_std def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = ViTImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = EfficientFormerImageProcessorTester(self) @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''')) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''')) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''')) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''')) self.assertTrue(hasattr(lowerCAmelCase__ , '''size''')) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_ : Optional[int] = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(lowerCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_ : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray) # Test not batched input SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor(lowerCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE_ : int = image_processor(lowerCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , )
366
"""simple docstring""" UpperCAmelCase_ : List[Any] = 9.8_0_6_6_5 def _A (__a , __a , __a = g ) -> float: """simple docstring""" if fluid_density <= 0: raise ValueError('''Impossible fluid density''' ) if volume < 0: raise ValueError('''Impossible Object volume''' ) if gravity <= 0: raise ValueError('''Impossible Gravity''' ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
318
0
"""simple docstring""" import importlib.metadata import operator import re import sys from typing import Optional from packaging import version UpperCAmelCase_ : Dict = { '<': operator.lt, '<=': operator.le, '==': operator.eq, '!=': operator.ne, '>=': operator.ge, '>': operator.gt, } def _A (__a , __a , __a , __a , __a , __a ) -> str: """simple docstring""" if got_ver is None or want_ver is None: raise ValueError( f'Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider' f' reinstalling {pkg}.' ) if not ops[op](version.parse(UpperCamelCase__ ) , version.parse(UpperCamelCase__ ) ): raise ImportError( f'{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}' ) def _A (__a , __a = None ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = f'\n{hint}' if hint is not None else '''''' # non-versioned check if re.match(R'''^[\w_\-\d]+$''' , UpperCamelCase__ ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = requirement, None, None else: SCREAMING_SNAKE_CASE_ : int = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , UpperCamelCase__ ) if not match: raise ValueError( '''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but''' f' got {requirement}' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = match[0] SCREAMING_SNAKE_CASE_ : List[Any] = want_full.split(''',''' ) # there could be multiple requirements SCREAMING_SNAKE_CASE_ : Dict = {} for w in want_range: SCREAMING_SNAKE_CASE_ : Optional[Any] = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , UpperCamelCase__ ) if not match: raise ValueError( '''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,''' f' but got {requirement}' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = match[0] SCREAMING_SNAKE_CASE_ : Optional[int] = want_ver if op not in ops: raise ValueError(f'{requirement}: need one of {list(ops.keys() )}, but got {op}' ) # special case if pkg == "python": SCREAMING_SNAKE_CASE_ : str = '''.'''.join([str(UpperCamelCase__ ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return # check if any version is installed try: SCREAMING_SNAKE_CASE_ : List[Any] = importlib.metadata.version(UpperCamelCase__ ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( f'The \'{requirement}\' distribution was not found and is required by this application. {hint}' ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _A (__a ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main''' return require_version(UpperCamelCase__ , UpperCamelCase__ )
367
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCAmelCase_ : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def _A (__a ) -> Union[str, Any]: """simple docstring""" from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__a ) def _A (__a ) -> Any: """simple docstring""" from transformers.testing_utils import pytest_terminal_summary_main SCREAMING_SNAKE_CASE_ : Optional[Any] = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__a , id=__a )
318
0
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def _A (__a , __a , __a = False ) -> list[float]: """simple docstring""" if radian_mode: return [magnitude * cos(__lowerCamelCase ), magnitude * sin(__lowerCamelCase )] return [magnitude * cos(radians(__lowerCamelCase ) ), magnitude * sin(radians(__lowerCamelCase ) )] def _A (__a , __a , __a = 10**-1 ) -> bool: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = cross(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE_ : List[Any] = sum(__lowerCamelCase ) return abs(__lowerCamelCase ) < eps if __name__ == "__main__": # Test to check if it works UpperCAmelCase_ : Dict = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) UpperCAmelCase_ : Optional[int] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg UpperCAmelCase_ : List[str] = array( [ polar_force(30 * 9.8_1, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) UpperCAmelCase_ : Optional[Any] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg UpperCAmelCase_ : int = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) UpperCAmelCase_ : List[Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
368
"""simple docstring""" import argparse import os import re import packaging.version UpperCAmelCase_ : Any = """examples/""" UpperCAmelCase_ : Optional[int] = { """examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""), """doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } UpperCAmelCase_ : List[Any] = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } UpperCAmelCase_ : Optional[int] = """README.md""" def _A (__a , __a , __a ) -> int: """simple docstring""" with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = REPLACE_PATTERNS[pattern] SCREAMING_SNAKE_CASE_ : Optional[int] = replace.replace('''VERSION''' , __a ) SCREAMING_SNAKE_CASE_ : Tuple = re_pattern.sub(__a , __a ) with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(__a ) def _A (__a ) -> int: """simple docstring""" for folder, directories, fnames in os.walk(__a ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(__a , __a ) , __a , pattern='''examples''' ) def _A (__a , __a=False ) -> List[str]: """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__a , __a , __a ) if not patch: update_version_in_examples(__a ) def _A () -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = '''🤗 Transformers currently provides the following architectures''' SCREAMING_SNAKE_CASE_ : Optional[int] = '''1. Want to contribute a new model?''' with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: SCREAMING_SNAKE_CASE_ : Tuple = f.readlines() # Find the start of the list. SCREAMING_SNAKE_CASE_ : Tuple = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 SCREAMING_SNAKE_CASE_ : Dict = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): SCREAMING_SNAKE_CASE_ : List[Any] = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(__a ) def _A () -> List[str]: """simple docstring""" with open(REPLACE_FILES['''init'''] , '''r''' ) as f: SCREAMING_SNAKE_CASE_ : Any = f.read() SCREAMING_SNAKE_CASE_ : Dict = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0] return packaging.version.parse(__a ) def _A (__a=False ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: SCREAMING_SNAKE_CASE_ : List[Any] = default_version.base_version elif patch: SCREAMING_SNAKE_CASE_ : int = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: SCREAMING_SNAKE_CASE_ : Any = f'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. SCREAMING_SNAKE_CASE_ : int = input(f'Which version are you releasing? [{default_version}]' ) if len(__a ) == 0: SCREAMING_SNAKE_CASE_ : Optional[Any] = default_version print(f'Updating version to {version}.' ) global_version_update(__a , patch=__a ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def _A () -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = get_version() SCREAMING_SNAKE_CASE_ : Any = f'{current_version.major}.{current_version.minor + 1}.0.dev0' SCREAMING_SNAKE_CASE_ : Union[str, Any] = current_version.base_version # Check with the user we got that right. SCREAMING_SNAKE_CASE_ : int = input(f'Which version are we developing now? [{dev_version}]' ) if len(__a ) == 0: SCREAMING_SNAKE_CASE_ : Optional[int] = dev_version print(f'Updating version to {version}.' ) global_version_update(__a ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") UpperCAmelCase_ : int = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
318
0
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml UpperCAmelCase_ : Tuple = logging.get_logger(__name__) def _A (__a , __a ) -> Optional[int]: """simple docstring""" def run_func(__a ): @wraps(_snake_case ) def run_in_eager_mode(*__a , **__a ): return func(*_snake_case , **_snake_case ) @wraps(_snake_case ) @tf.function(experimental_compile=_snake_case ) def run_in_graph_mode(*__a , **__a ): return func(*_snake_case , **_snake_case ) if do_eager_mode is True: if use_xla is not False: raise ValueError( '''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' ) return run_in_eager_mode else: return run_in_graph_mode return run_func def _A (__a , __a , __a ) -> ["tf.Tensor"]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = random.Random() SCREAMING_SNAKE_CASE_ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(_snake_case , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = 4_2 __UpperCamelCase = 4_2 __UpperCamelCase = "TensorFlow" @property def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' return tf.__version__ def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''') SCREAMING_SNAKE_CASE_ : Any = self._prepare_inference_func(_a , _a , _a) return self._measure_speed(_inference) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : int , lowercase_ : str , lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''') SCREAMING_SNAKE_CASE_ : int = self._prepare_train_func(_a , _a , _a) return self._measure_speed(_train) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : str): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _a) SCREAMING_SNAKE_CASE_ : Dict = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''') SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_inference_func(_a , _a , _a) return self._measure_memory(_inference) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Optional[int]): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _a) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''') SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_train_func(_a , _a , _a) return self._measure_memory(_train) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError('''Mixed precision is currently not supported.''') SCREAMING_SNAKE_CASE_ : Dict = ( hasattr(_a , '''architectures''') and isinstance(config.architectures , _a) and len(config.architectures) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: SCREAMING_SNAKE_CASE_ : Optional[Any] = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model SCREAMING_SNAKE_CASE_ : Dict = __import__('''transformers''' , fromlist=[model_class]) SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_a , _a) SCREAMING_SNAKE_CASE_ : List[Any] = model_cls(_a) except ImportError: raise ImportError( F'{model_class} does not exist. If you just want to test the pretrained model, you might want to' ''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''') else: SCREAMING_SNAKE_CASE_ : Optional[int] = TF_MODEL_MAPPING[config.__class__](_a) # encoder-decoder has vocab size saved differently SCREAMING_SNAKE_CASE_ : Union[str, Any] = config.vocab_size if hasattr(_a , '''vocab_size''') else config.encoder.vocab_size SCREAMING_SNAKE_CASE_ : int = random_input_ids(_a , _a , _a) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla) def encoder_decoder_forward(): return model(_a , decoder_input_ids=_a , training=_a) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla) def encoder_forward(): return model(_a , training=_a) SCREAMING_SNAKE_CASE_ : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''') if self.args.fpaa: raise NotImplementedError('''Mixed precision is currently not supported.''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( hasattr(_a , '''architectures''') and isinstance(config.architectures , _a) and len(config.architectures) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: SCREAMING_SNAKE_CASE_ : List[Any] = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model SCREAMING_SNAKE_CASE_ : List[Any] = __import__('''transformers''' , fromlist=[model_class]) SCREAMING_SNAKE_CASE_ : str = getattr(_a , _a) SCREAMING_SNAKE_CASE_ : Optional[Any] = model_cls(_a) except ImportError: raise ImportError( F'{model_class} does not exist. If you just want to test the pretrained model, you might want to' ''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''') else: SCREAMING_SNAKE_CASE_ : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_a) # encoder-decoder has vocab size saved differently SCREAMING_SNAKE_CASE_ : Any = config.vocab_size if hasattr(_a , '''vocab_size''') else config.encoder.vocab_size SCREAMING_SNAKE_CASE_ : Any = random_input_ids(_a , _a , _a) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla) def encoder_decoder_train(): SCREAMING_SNAKE_CASE_ : Optional[int] = model(_a , decoder_input_ids=_a , labels=_a , training=_a)[0] SCREAMING_SNAKE_CASE_ : int = tf.gradients(_a , model.trainable_variables) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla) def encoder_train(): SCREAMING_SNAKE_CASE_ : List[str] = model(_a , labels=_a , training=_a)[0] SCREAMING_SNAKE_CASE_ : Dict = tf.gradients(_a , model.trainable_variables) return gradients SCREAMING_SNAKE_CASE_ : Optional[int] = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Optional[Any]): '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''') timeit.repeat(_a , repeat=1 , number=5) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average SCREAMING_SNAKE_CASE_ : int = timeit.repeat( _a , repeat=self.args.repeat , number=10 , ) return min(_a) / 10.0 except ResourceExhaustedError as e: self.print_fn(F'Doesn\'t fit on GPU. {e}') def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Union[str, Any]): '''simple docstring''' logger.info( '''Note that TensorFlow allocates more memory than ''' '''it might need to speed up computation. ''' '''The memory reported here corresponds to the memory ''' '''reported by `nvidia-smi`, which can vary depending ''' '''on total available memory on the GPU that is used.''') with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( '''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory''' ''' consumption line by line.''') SCREAMING_SNAKE_CASE_ : List[Any] = start_memory_tracing('''transformers''') if self.args.is_tpu: # tpu raise NotImplementedError( '''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking''' ''' with `args.memory=False`''') elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( '''py3nvml not installed, we won\'t log GPU memory usage. ''' '''Install py3nvml (pip install py3nvml) to log information about GPU.''') SCREAMING_SNAKE_CASE_ : List[str] = "N/A" else: logger.info( '''Measuring total GPU usage on GPU device. Make sure to not have additional processes''' ''' running on the same GPU.''') # init nvml nvml.nvmlInit() func() SCREAMING_SNAKE_CASE_ : Tuple = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx) SCREAMING_SNAKE_CASE_ : Dict = nvml.nvmlDeviceGetMemoryInfo(_a) SCREAMING_SNAKE_CASE_ : Optional[int] = meminfo.used SCREAMING_SNAKE_CASE_ : List[str] = Memory(_a) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( '''When enabling line by line tracing, the max peak memory for CPU is inaccurate in''' ''' TensorFlow.''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = None else: SCREAMING_SNAKE_CASE_ : List[str] = measure_peak_memory_cpu(_a) SCREAMING_SNAKE_CASE_ : List[Any] = Memory(_a) if isinstance(_a , _a) else memory_bytes if self.args.trace_memory_line_by_line: SCREAMING_SNAKE_CASE_ : List[str] = stop_memory_tracing(_a) if memory is None: SCREAMING_SNAKE_CASE_ : Optional[int] = summary.total else: SCREAMING_SNAKE_CASE_ : Union[str, Any] = None return memory, summary except ResourceExhaustedError as e: self.print_fn(F'Doesn\'t fit on GPU. {e}') return "N/A", None
369
"""simple docstring""" from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def _A (__a , __a , __a=1e-12 ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T SCREAMING_SNAKE_CASE_ : List[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T return jnp.matmul(__a , norm_emb_a.T ) class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = jnp.floataa def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxCLIPVisionModule(self.config.vision_config) SCREAMING_SNAKE_CASE_ : Tuple = nn.Dense(self.config.projection_dim , use_bias=lowercase_ , dtype=self.dtype) SCREAMING_SNAKE_CASE_ : List[str] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param( '''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim)) SCREAMING_SNAKE_CASE_ : Dict = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,)) def __call__( self : Optional[Any] , lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.vision_model(lowercase_)[1] SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.visual_projection(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.special_care_embeds) SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.concept_embeds) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs SCREAMING_SNAKE_CASE_ : Tuple = 0.0 SCREAMING_SNAKE_CASE_ : Dict = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.round(lowercase_ , 3) SCREAMING_SNAKE_CASE_ : List[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=lowercase_) # Use a lower threshold if an image has any special care concept SCREAMING_SNAKE_CASE_ : Dict = is_special_care * 0.01 SCREAMING_SNAKE_CASE_ : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment SCREAMING_SNAKE_CASE_ : Any = jnp.round(lowercase_ , 3) SCREAMING_SNAKE_CASE_ : Dict = jnp.any(concept_scores > 0 , axis=1) return has_nsfw_concepts class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = CLIPConfig __UpperCamelCase = "clip_input" __UpperCamelCase = FlaxStableDiffusionSafetyCheckerModule def __init__( self : Union[str, Any] , lowercase_ : CLIPConfig , lowercase_ : Optional[Tuple] = None , lowercase_ : int = 0 , lowercase_ : jnp.dtype = jnp.floataa , lowercase_ : bool = True , **lowercase_ : Any , ): '''simple docstring''' if input_shape is None: SCREAMING_SNAKE_CASE_ : List[str] = (1, 224, 224, 3) SCREAMING_SNAKE_CASE_ : List[Any] = self.module_class(config=lowercase_ , dtype=lowercase_ , **lowercase_) super().__init__(lowercase_ , lowercase_ , input_shape=lowercase_ , seed=lowercase_ , dtype=lowercase_ , _do_init=_do_init) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : jax.random.KeyArray , lowercase_ : Tuple , lowercase_ : FrozenDict = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = jax.random.normal(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.random.split(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = {'''params''': params_rng, '''dropout''': dropout_rng} SCREAMING_SNAKE_CASE_ : List[Any] = self.module.init(lowercase_ , lowercase_)['''params'''] return random_params def __call__( self : List[Any] , lowercase_ : List[str] , lowercase_ : dict = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = jnp.transpose(lowercase_ , (0, 2, 3, 1)) return self.module.apply( {'''params''': params or self.params} , jnp.array(lowercase_ , dtype=jnp.floataa) , rngs={} , )
318
0
import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex UpperCAmelCase_ : Any = logging.getLogger(__name__) class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = False def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int): '''simple docstring''' if not self.initialized: SCREAMING_SNAKE_CASE_ : int = RagRetriever( _a , question_encoder_tokenizer=_a , generator_tokenizer=_a , index=_a , init_retrieval=_a , ) SCREAMING_SNAKE_CASE_ : str = True def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' self.retriever.index.init_index() def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str] , lowercase_ : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = self.retriever._main_retrieve(_a , _a) return doc_ids, retrieved_doc_embeds class lowerCAmelCase__ ( _a ): '''simple docstring''' def __init__( self : str , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : int=None): '''simple docstring''' if index is not None and index.is_initialized() and len(_a) > 0: raise ValueError( '''When using Ray for distributed fine-tuning, ''' '''you\'ll need to provide the paths instead, ''' '''as the dataset and the index are loaded ''' '''separately. More info in examples/rag/use_own_knowledge_dataset.py ''') super().__init__( _a , question_encoder_tokenizer=_a , generator_tokenizer=_a , index=_a , init_retrieval=_a , ) SCREAMING_SNAKE_CASE_ : List[Any] = retrieval_workers if len(self.retrieval_workers) > 0: ray.get( [ worker.create_rag_retriever.remote(_a , _a , _a , _a) for worker in self.retrieval_workers ]) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' logger.info('''initializing retrieval''') if len(self.retrieval_workers) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers]) else: # Non-distributed training. Load index into this same process. self.index.init_index() def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : int): '''simple docstring''' if len(self.retrieval_workers) > 0: # Select a random retrieval actor. SCREAMING_SNAKE_CASE_ : int = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)] SCREAMING_SNAKE_CASE_ : Tuple = ray.get(random_worker.retrieve.remote(_a , _a)) else: SCREAMING_SNAKE_CASE_ : Tuple = self._main_retrieve(_a , _a) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_a) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any]=None , **lowercase_ : Union[str, Any]): '''simple docstring''' return super(_a , cls).get_tokenizers(_a , _a , **_a) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Dict=None , **lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop('''config''' , _a) or RagConfig.from_pretrained(_a , **_a) SCREAMING_SNAKE_CASE_ : int = RagTokenizer.from_pretrained(_a , config=_a) SCREAMING_SNAKE_CASE_ : str = rag_tokenizer.question_encoder SCREAMING_SNAKE_CASE_ : Optional[Any] = rag_tokenizer.generator if indexed_dataset is not None: SCREAMING_SNAKE_CASE_ : Tuple = "custom" SCREAMING_SNAKE_CASE_ : Optional[int] = CustomHFIndex(config.retrieval_vector_size , _a) else: SCREAMING_SNAKE_CASE_ : str = cls._build_index(_a) return cls( _a , question_encoder_tokenizer=_a , generator_tokenizer=_a , retrieval_workers=_a , index=_a , )
370
"""simple docstring""" from __future__ import annotations import queue class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Tuple , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = data SCREAMING_SNAKE_CASE_ : Tuple = None SCREAMING_SNAKE_CASE_ : Dict = None def _A () -> TreeNode: """simple docstring""" print('''\n********Press N to stop entering at any point of time********\n''' ) SCREAMING_SNAKE_CASE_ : List[Any] = input('''Enter the value of the root node: ''' ).strip().lower() SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue() SCREAMING_SNAKE_CASE_ : Union[str, Any] = TreeNode(int(__a ) ) q.put(__a ) while not q.empty(): SCREAMING_SNAKE_CASE_ : Optional[int] = q.get() SCREAMING_SNAKE_CASE_ : List[str] = f'Enter the left node of {node_found.data}: ' SCREAMING_SNAKE_CASE_ : Optional[int] = input(__a ).strip().lower() or '''n''' if check == "n": return tree_node SCREAMING_SNAKE_CASE_ : List[str] = TreeNode(int(__a ) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = left_node q.put(__a ) SCREAMING_SNAKE_CASE_ : str = f'Enter the right node of {node_found.data}: ' SCREAMING_SNAKE_CASE_ : str = input(__a ).strip().lower() or '''n''' if check == "n": return tree_node SCREAMING_SNAKE_CASE_ : Any = TreeNode(int(__a ) ) SCREAMING_SNAKE_CASE_ : int = right_node q.put(__a ) raise def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return print(node.data , end=''',''' ) pre_order(node.left ) pre_order(node.right ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return in_order(node.left ) print(node.data , end=''',''' ) in_order(node.right ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=''',''' ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue() q.put(__a ) while not q.empty(): SCREAMING_SNAKE_CASE_ : Tuple = q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue() q.put(__a ) while not q.empty(): SCREAMING_SNAKE_CASE_ : str = [] while not q.empty(): SCREAMING_SNAKE_CASE_ : List[str] = q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__a ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ : list[TreeNode] = [] SCREAMING_SNAKE_CASE_ : Union[str, Any] = node while n or stack: while n: # start from root node, find its left child print(n.data , end=''',''' ) stack.append(__a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = n.left # end of while means current node doesn't have left child SCREAMING_SNAKE_CASE_ : Tuple = stack.pop() # start to traverse its right child SCREAMING_SNAKE_CASE_ : str = n.right def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ : list[TreeNode] = [] SCREAMING_SNAKE_CASE_ : Any = node while n or stack: while n: stack.append(__a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.left SCREAMING_SNAKE_CASE_ : Any = stack.pop() print(n.data , end=''',''' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.right def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = [], [] SCREAMING_SNAKE_CASE_ : List[Any] = node stacka.append(__a ) while stacka: # to find the reversed order of post order, store it in stack2 SCREAMING_SNAKE_CASE_ : List[str] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__a ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=''',''' ) def _A (__a = "" , __a=50 , __a="*" ) -> str: """simple docstring""" if not s: return "\n" + width * char SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(width - len(__a ) - 2 , 2 ) return f'{left * char} {s} {(left + extra) * char}' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) UpperCAmelCase_ : TreeNode = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
318
0
"""simple docstring""" import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase__ ( _a , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = None __UpperCamelCase = BloomTokenizerFast __UpperCamelCase = BloomTokenizerFast __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = "tokenizer_file" __UpperCamelCase = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"} def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' super().setUp() SCREAMING_SNAKE_CASE_ : List[str] = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''') tokenizer.save_pretrained(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : Dict , **lowercase_ : Dict): '''simple docstring''' kwargs.update(self.special_tokens_map) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""] SCREAMING_SNAKE_CASE_ : str = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]] SCREAMING_SNAKE_CASE_ : int = tokenizer.batch_encode_plus(snake_case_)["""input_ids"""] self.assertListEqual(snake_case_ , snake_case_) SCREAMING_SNAKE_CASE_ : str = tokenizer.batch_decode(snake_case_) self.assertListEqual(snake_case_ , snake_case_) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str=6): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'): SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input SCREAMING_SNAKE_CASE_ : str = """This is a simple input""" SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""This is a simple input 1""", """This is a simple input 2"""] SCREAMING_SNAKE_CASE_ : List[str] = ("""This is a simple input""", """This is a pair""") SCREAMING_SNAKE_CASE_ : str = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests try: tokenizer_r.encode(snake_case_ , max_length=snake_case_) tokenizer_r.encode_plus(snake_case_ , max_length=snake_case_) tokenizer_r.batch_encode_plus(snake_case_ , max_length=snake_case_) tokenizer_r.encode(snake_case_ , max_length=snake_case_) tokenizer_r.batch_encode_plus(snake_case_ , max_length=snake_case_) except ValueError: self.fail('''Bloom Tokenizer should be able to deal with padding''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = None # Hotfixing padding = None self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding='''max_length''') # Simple input self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding='''max_length''') # Simple input self.assertRaises( snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding='''max_length''' , ) # Pair input self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding='''max_length''') # Pair input self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding='''max_length''') # Pair input self.assertRaises( snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding='''max_length''' , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_ : Any = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=snake_case_) SCREAMING_SNAKE_CASE_ : Dict = next(iter(snake_case_))["""premise"""] # pick up one data SCREAMING_SNAKE_CASE_ : Tuple = list(sample_data.values()) SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(map(tokenizer.encode , snake_case_)) SCREAMING_SNAKE_CASE_ : Optional[Any] = [tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_) for x in output_tokens] self.assertListEqual(snake_case_ , snake_case_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
371
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = "ssube/stable-diffusion-x4-upscaler-onnx" def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any]=0): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_)) SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Any = np.array( [0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23]) assert np.abs(image_slice - expected_slice).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') SCREAMING_SNAKE_CASE_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Any = np.array( [0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Tuple = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Tuple = np.array( [0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') SCREAMING_SNAKE_CASE_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array( [0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') SCREAMING_SNAKE_CASE_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : int = np.array( [0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = ort.SessionOptions() SCREAMING_SNAKE_CASE_ : Optional[int] = False return options def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''') SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128)) # using the PNDM scheduler by default SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''A fantasy landscape, trending on artstation''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : List[Any] = pipe( prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type='''np''' , ) SCREAMING_SNAKE_CASE_ : Optional[int] = output.images SCREAMING_SNAKE_CASE_ : Optional[int] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : int = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''') SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128)) SCREAMING_SNAKE_CASE_ : Tuple = LMSDiscreteScheduler.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''') SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : int = '''A fantasy landscape, trending on artstation''' SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : int = pipe( prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type='''np''' , ) SCREAMING_SNAKE_CASE_ : Optional[int] = output.images SCREAMING_SNAKE_CASE_ : Dict = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : List[str] = np.array( [0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
318
0
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging UpperCAmelCase_ : Any = logging.get_logger(__name__) UpperCAmelCase_ : int = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class lowerCAmelCase__ ( __lowerCamelCase ): '''simple docstring''' __UpperCamelCase = """gptj""" __UpperCamelCase = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : str , lowercase_ : Tuple=50400 , lowercase_ : Tuple=2048 , lowercase_ : Any=4096 , lowercase_ : Dict=28 , lowercase_ : Tuple=16 , lowercase_ : Optional[int]=64 , lowercase_ : str=None , lowercase_ : List[Any]="gelu_new" , lowercase_ : int=0.0 , lowercase_ : Dict=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : Tuple=1e-5 , lowercase_ : List[str]=0.02 , lowercase_ : Any=True , lowercase_ : List[Any]=50256 , lowercase_ : Union[str, Any]=50256 , lowercase_ : Dict=False , **lowercase_ : Union[str, Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size SCREAMING_SNAKE_CASE_ : str = n_positions SCREAMING_SNAKE_CASE_ : Optional[Any] = n_embd SCREAMING_SNAKE_CASE_ : List[str] = n_layer SCREAMING_SNAKE_CASE_ : Optional[Any] = n_head SCREAMING_SNAKE_CASE_ : List[Any] = n_inner SCREAMING_SNAKE_CASE_ : List[str] = rotary_dim SCREAMING_SNAKE_CASE_ : Dict = activation_function SCREAMING_SNAKE_CASE_ : str = resid_pdrop SCREAMING_SNAKE_CASE_ : Optional[Any] = embd_pdrop SCREAMING_SNAKE_CASE_ : Optional[Any] = attn_pdrop SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_epsilon SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range SCREAMING_SNAKE_CASE_ : List[Any] = use_cache SCREAMING_SNAKE_CASE_ : str = bos_token_id SCREAMING_SNAKE_CASE_ : Dict = eos_token_id super().__init__( bos_token_id=__lowercase , eos_token_id=__lowercase , tie_word_embeddings=__lowercase , **__lowercase) class lowerCAmelCase__ ( __lowerCamelCase ): '''simple docstring''' def __init__( self : List[str] , lowercase_ : PretrainedConfig , lowercase_ : str = "default" , lowercase_ : List[PatchingSpec] = None , lowercase_ : bool = False , ): '''simple docstring''' super().__init__(__lowercase , task=__lowercase , patching_specs=__lowercase , use_past=__lowercase) if not getattr(self._config , '''pad_token_id''' , __lowercase): # TODO: how to do that better? SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0 @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}}) if self.use_past: self.fill_with_past_key_values_(__lowercase , direction='''inputs''') SCREAMING_SNAKE_CASE_ : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: SCREAMING_SNAKE_CASE_ : Optional[Any] = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' return self._config.n_layer @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return self._config.n_head def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = super(__lowercase , self).generate_dummy_inputs( __lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase) # We need to order the input in the way they appears in the forward() SCREAMING_SNAKE_CASE_ : Optional[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']}) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''') else: import torch SCREAMING_SNAKE_CASE_ : Union[str, Any] = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values SCREAMING_SNAKE_CASE_ : Tuple = seqlen + 2 SCREAMING_SNAKE_CASE_ : Dict = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) SCREAMING_SNAKE_CASE_ : Any = [ (torch.zeros(__lowercase), torch.zeros(__lowercase)) for _ in range(self.num_layers) ] SCREAMING_SNAKE_CASE_ : List[str] = common_inputs['''attention_mask'''] if self.use_past: SCREAMING_SNAKE_CASE_ : int = ordered_inputs['''attention_mask'''].dtype SCREAMING_SNAKE_CASE_ : Dict = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__lowercase , __lowercase , dtype=__lowercase)] , dim=1) return ordered_inputs @property def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' return 13
350
"""simple docstring""" from scipy.stats import pearsonr import datasets UpperCAmelCase_ : List[Any] = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ UpperCAmelCase_ : Optional[int] = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ UpperCAmelCase_ : Tuple = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float'''), '''references''': datasets.Value('''float'''), }) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=False): '''simple docstring''' if return_pvalue: SCREAMING_SNAKE_CASE_ : int = pearsonr(lowercase_ , lowercase_) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(lowercase_ , lowercase_)[0])}
318
0
"""simple docstring""" from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ : List[Any] = logging.get_logger(__name__) UpperCAmelCase_ : Union[str, Any] = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class lowerCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' __UpperCamelCase = "efficientnet" def __init__( self : Tuple , lowercase_ : int = 3 , lowercase_ : int = 600 , lowercase_ : float = 2.0 , lowercase_ : float = 3.1 , lowercase_ : int = 8 , lowercase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowercase_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowercase_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowercase_ : List[int] = [] , lowercase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowercase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowercase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowercase_ : float = 0.25 , lowercase_ : str = "swish" , lowercase_ : int = 2560 , lowercase_ : str = "mean" , lowercase_ : float = 0.02 , lowercase_ : float = 0.0_01 , lowercase_ : float = 0.99 , lowercase_ : float = 0.5 , lowercase_ : float = 0.2 , **lowercase_ : int , ): '''simple docstring''' super().__init__(**lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = num_channels SCREAMING_SNAKE_CASE_ : List[str] = image_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = width_coefficient SCREAMING_SNAKE_CASE_ : Optional[Any] = depth_coefficient SCREAMING_SNAKE_CASE_ : Union[str, Any] = depth_divisor SCREAMING_SNAKE_CASE_ : Dict = kernel_sizes SCREAMING_SNAKE_CASE_ : Union[str, Any] = in_channels SCREAMING_SNAKE_CASE_ : Dict = out_channels SCREAMING_SNAKE_CASE_ : Dict = depthwise_padding SCREAMING_SNAKE_CASE_ : int = strides SCREAMING_SNAKE_CASE_ : List[str] = num_block_repeats SCREAMING_SNAKE_CASE_ : Optional[Any] = expand_ratios SCREAMING_SNAKE_CASE_ : List[str] = squeeze_expansion_ratio SCREAMING_SNAKE_CASE_ : int = hidden_act SCREAMING_SNAKE_CASE_ : int = hidden_dim SCREAMING_SNAKE_CASE_ : int = pooling_type SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range SCREAMING_SNAKE_CASE_ : Any = batch_norm_eps SCREAMING_SNAKE_CASE_ : List[Any] = batch_norm_momentum SCREAMING_SNAKE_CASE_ : int = dropout_rate SCREAMING_SNAKE_CASE_ : int = drop_connect_rate SCREAMING_SNAKE_CASE_ : List[Any] = sum(lowercase_) * 4 class lowerCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' __UpperCamelCase = version.parse("1.11" ) @property def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ]) @property def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' return 1e-5
351
"""simple docstring""" import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class lowerCAmelCase__ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Any , lowercase_ : Dict[str, int] , lowercase_ : List[str] , lowercase_ : int = None , lowercase_ : int = None): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE_ : str = pad_token_id SCREAMING_SNAKE_CASE_ : Optional[int] = max_length SCREAMING_SNAKE_CASE_ : Dict = vocab SCREAMING_SNAKE_CASE_ : Dict = merges SCREAMING_SNAKE_CASE_ : Union[str, Any] = BytePairTokenizer(lowercase_ , lowercase_ , sequence_length=lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : GPTaTokenizer , *lowercase_ : Optional[Any] , **lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = [''' '''.join(lowercase_) for m in tokenizer.bpe_ranks.keys()] SCREAMING_SNAKE_CASE_ : str = tokenizer.get_vocab() return cls(lowercase_ , lowercase_ , *lowercase_ , **lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : int , lowercase_ : Union[str, os.PathLike] , *lowercase_ : List[str] , **lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = GPTaTokenizer.from_pretrained(lowercase_ , *lowercase_ , **lowercase_) return cls.from_tokenizer(lowercase_ , *lowercase_ , **lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : List[Any]): '''simple docstring''' return cls(**lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[Any] , lowercase_ : int = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.tf_tokenizer(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = tf.ones_like(lowercase_) if self.pad_token_id is not None: # pad the tokens up to max length SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_length if max_length is not None else self.max_length if max_length is not None: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = pad_model_inputs( lowercase_ , max_seq_length=lowercase_ , pad_value=self.pad_token_id) return {"attention_mask": attention_mask, "input_ids": input_ids}
318
0
"""simple docstring""" import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any]=13 , lowercase_ : Union[str, Any]=[30, 30] , lowercase_ : Union[str, Any]=2 , lowercase_ : Any=3 , lowercase_ : int=True , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=32 , lowercase_ : Any=5 , lowercase_ : Tuple=4 , lowercase_ : List[str]=37 , lowercase_ : List[str]="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=10 , lowercase_ : str=0.02 , lowercase_ : Optional[int]=3 , lowercase_ : Union[str, Any]=None , lowercase_ : List[str]=8 , lowercase_ : str=10 , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = parent SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size SCREAMING_SNAKE_CASE_ : Tuple = image_size SCREAMING_SNAKE_CASE_ : Optional[int] = patch_size SCREAMING_SNAKE_CASE_ : int = num_channels SCREAMING_SNAKE_CASE_ : List[Any] = is_training SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size SCREAMING_SNAKE_CASE_ : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE_ : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE_ : Any = intermediate_size SCREAMING_SNAKE_CASE_ : Tuple = hidden_act SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : Optional[int] = type_sequence_label_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range SCREAMING_SNAKE_CASE_ : int = num_labels SCREAMING_SNAKE_CASE_ : Any = scope SCREAMING_SNAKE_CASE_ : List[str] = n_targets SCREAMING_SNAKE_CASE_ : Tuple = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens SCREAMING_SNAKE_CASE_ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size) SCREAMING_SNAKE_CASE_ : str = num_patches + 1 + self.num_detection_tokens def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]]) SCREAMING_SNAKE_CASE_ : Union[str, Any] = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) SCREAMING_SNAKE_CASE_ : Union[str, Any] = [] for i in range(self.batch_size): SCREAMING_SNAKE_CASE_ : Any = {} SCREAMING_SNAKE_CASE_ : List[str] = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=__SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : Tuple = torch.rand(self.n_targets , 4 , device=__SCREAMING_SNAKE_CASE) labels.append(__SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = YolosModel(config=__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = YolosForObjectDetection(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(pixel_values=__SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : Tuple = model(__SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4)) SCREAMING_SNAKE_CASE_ : int = model(pixel_values=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4)) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = config_and_inputs SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else () __UpperCamelCase = ( {"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : str=False): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE) if return_labels: if model_class.__name__ == "YolosForObjectDetection": SCREAMING_SNAKE_CASE_ : Optional[Any] = [] for i in range(self.model_tester.batch_size): SCREAMING_SNAKE_CASE_ : Optional[int] = {} SCREAMING_SNAKE_CASE_ : Dict = torch.ones( size=(self.model_tester.n_targets,) , device=__SCREAMING_SNAKE_CASE , dtype=torch.long) SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.ones( self.model_tester.n_targets , 4 , device=__SCREAMING_SNAKE_CASE , dtype=torch.float) labels.append(__SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : Optional[int] = labels return inputs_dict def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = YolosModelTester(self) SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Tuple = model_class(__SCREAMING_SNAKE_CASE) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) SCREAMING_SNAKE_CASE_ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear)) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(__SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : Tuple = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ : Union[str, Any] = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : Optional[Any] = True # in YOLOS, the seq_len is different SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.expected_seq_len for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Optional[Any] = True SCREAMING_SNAKE_CASE_ : Tuple = False SCREAMING_SNAKE_CASE_ : List[str] = True SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)) SCREAMING_SNAKE_CASE_ : Dict = outputs.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE_ : Union[str, Any] = True SCREAMING_SNAKE_CASE_ : Tuple = model_class(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Optional[int] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)) SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) SCREAMING_SNAKE_CASE_ : List[Any] = len(__SCREAMING_SNAKE_CASE) # Check attention is always last and order is fine SCREAMING_SNAKE_CASE_ : Optional[Any] = True SCREAMING_SNAKE_CASE_ : Optional[Any] = True SCREAMING_SNAKE_CASE_ : Any = model_class(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)) SCREAMING_SNAKE_CASE_ : Optional[int] = 1 self.assertEqual(out_len + added_hidden_states , len(__SCREAMING_SNAKE_CASE)) SCREAMING_SNAKE_CASE_ : Tuple = outputs.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' def check_hidden_states_output(lowercase_ : str , lowercase_ : Any , lowercase_ : int): SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE) model.to(__SCREAMING_SNAKE_CASE) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs.hidden_states SCREAMING_SNAKE_CASE_ : Any = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE) # YOLOS has a different seq_length SCREAMING_SNAKE_CASE_ : int = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : int = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE_ : str = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*__SCREAMING_SNAKE_CASE) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : Any = YolosModel.from_pretrained(__SCREAMING_SNAKE_CASE) self.assertIsNotNone(__SCREAMING_SNAKE_CASE) def _A () -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''') if is_vision_available() else None @slow def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''').to(__SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : Dict = self.default_image_processor SCREAMING_SNAKE_CASE_ : int = prepare_img() SCREAMING_SNAKE_CASE_ : str = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ : int = model(inputs.pixel_values) # verify outputs SCREAMING_SNAKE_CASE_ : Dict = torch.Size((1, 100, 92)) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor( [[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] , device=__SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor( [[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] , device=__SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4)) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4)) # verify postprocessing SCREAMING_SNAKE_CASE_ : str = image_processor.post_process_object_detection( __SCREAMING_SNAKE_CASE , threshold=0.3 , target_sizes=[image.size[::-1]])[0] SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([0.99_94, 0.97_90, 0.99_64, 0.99_72, 0.98_61]).to(__SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : List[str] = [75, 75, 17, 63, 17] SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([3_35.06_09, 79.38_48, 3_75.42_16, 1_87.24_95]).to(__SCREAMING_SNAKE_CASE) self.assertEqual(len(results['''scores''']) , 5) self.assertTrue(torch.allclose(results['''scores'''] , __SCREAMING_SNAKE_CASE , atol=1e-4)) self.assertSequenceEqual(results['''labels'''].tolist() , __SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __SCREAMING_SNAKE_CASE))
352
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ : Union[str, Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''的''', '''价''', '''格''', '''是''', '''15''', '''便''', '''alex''', '''##andra''', ''',''', '''。''', '''-''', '''t''', '''shirt''', ] SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens])) SCREAMING_SNAKE_CASE_ : Dict = { '''do_resize''': True, '''size''': {'''height''': 224, '''width''': 224}, '''do_center_crop''': True, '''crop_size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], '''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], '''do_convert_rgb''': True, } SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , lowercase_) with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp: json.dump(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowercase_ : str): '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowercase_ : List[Any]): '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowercase_ : str): '''simple docstring''' return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] SCREAMING_SNAKE_CASE_ : Dict = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor() SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) processor_slow.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_) SCREAMING_SNAKE_CASE_ : Any = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) processor_fast.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , lowercase_) self.assertIsInstance(processor_fast.tokenizer , lowercase_) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , lowercase_) self.assertIsInstance(processor_fast.image_processor , lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''') SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor(do_normalize=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=lowercase_) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , lowercase_) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Any = image_processor(lowercase_ , return_tensors='''np''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(images=lowercase_ , return_tensors='''np''') for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : str = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = '''Alexandra,T-shirt的价格是15便士。''' SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(lowercase_) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''Alexandra,T-shirt的价格是15便士。''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : int = processor(text=lowercase_ , images=lowercase_) self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values''']) # test if it raises when no input is passed with pytest.raises(lowercase_): processor() def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE_ : Optional[int] = processor.batch_decode(lowercase_) SCREAMING_SNAKE_CASE_ : Dict = tokenizer.batch_decode(lowercase_) self.assertListEqual(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Dict = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = '''Alexandra,T-shirt的价格是15便士。''' SCREAMING_SNAKE_CASE_ : Dict = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Dict = processor(text=lowercase_ , images=lowercase_) self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
318
0
"""simple docstring""" def _A () -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] SCREAMING_SNAKE_CASE_ : List[Any] = 6 SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1 SCREAMING_SNAKE_CASE_ : Dict = 19_01 SCREAMING_SNAKE_CASE_ : str = 0 while year < 20_01: day += 7 if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 SCREAMING_SNAKE_CASE_ : Tuple = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 SCREAMING_SNAKE_CASE_ : List[str] = day - 29 else: if day > days_per_month[month - 1]: month += 1 SCREAMING_SNAKE_CASE_ : str = day - days_per_month[month - 2] if month > 12: year += 1 SCREAMING_SNAKE_CASE_ : str = 1 if year < 20_01 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
353
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "rwkv" __UpperCamelCase = {"max_position_embeddings": "context_length"} def __init__( self : Union[str, Any] , lowercase_ : Any=50277 , lowercase_ : str=1024 , lowercase_ : List[str]=4096 , lowercase_ : Optional[Any]=32 , lowercase_ : Any=None , lowercase_ : Any=None , lowercase_ : List[Any]=1e-5 , lowercase_ : Union[str, Any]=0 , lowercase_ : Union[str, Any]=0 , lowercase_ : int=6 , lowercase_ : Tuple=False , lowercase_ : Any=True , **lowercase_ : Any , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size SCREAMING_SNAKE_CASE_ : Any = context_length SCREAMING_SNAKE_CASE_ : int = hidden_size SCREAMING_SNAKE_CASE_ : int = num_hidden_layers SCREAMING_SNAKE_CASE_ : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size SCREAMING_SNAKE_CASE_ : int = intermediate_size if intermediate_size is not None else 4 * hidden_size SCREAMING_SNAKE_CASE_ : int = layer_norm_epsilon SCREAMING_SNAKE_CASE_ : Optional[int] = rescale_every SCREAMING_SNAKE_CASE_ : Dict = use_cache SCREAMING_SNAKE_CASE_ : Dict = bos_token_id SCREAMING_SNAKE_CASE_ : Any = eos_token_id super().__init__( tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
318
0
"""simple docstring""" import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class lowerCAmelCase__ ( enum.Enum ): '''simple docstring''' __UpperCamelCase = 0 __UpperCamelCase = 1 __UpperCamelCase = 2 @add_end_docstrings(UpperCAmelCase__ ) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self : int , *lowercase_ : Optional[int] , **lowercase_ : Dict): '''simple docstring''' super().__init__(*_lowerCAmelCase , **_lowerCAmelCase) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. SCREAMING_SNAKE_CASE_ : Any = None if self.model.config.prefix is not None: SCREAMING_SNAKE_CASE_ : Any = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. SCREAMING_SNAKE_CASE_ : Any = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self._sanitize_parameters(prefix=_lowerCAmelCase , **self._forward_params) SCREAMING_SNAKE_CASE_ : Optional[Any] = {**self._preprocess_params, **preprocess_params} SCREAMING_SNAKE_CASE_ : Dict = {**self._forward_params, **forward_params} def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str]=None , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Any=None , lowercase_ : List[str]=None , lowercase_ : Optional[int]=None , **lowercase_ : str , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = {} if prefix is not None: SCREAMING_SNAKE_CASE_ : Any = prefix if prefix: SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer( _lowerCAmelCase , padding=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors=self.framework) SCREAMING_SNAKE_CASE_ : List[str] = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected' ''' [None, \'hole\']''') SCREAMING_SNAKE_CASE_ : Optional[Any] = handle_long_generation preprocess_params.update(_lowerCAmelCase) SCREAMING_SNAKE_CASE_ : Tuple = generate_kwargs SCREAMING_SNAKE_CASE_ : Any = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''') if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''') SCREAMING_SNAKE_CASE_ : List[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''') SCREAMING_SNAKE_CASE_ : Optional[int] = ReturnType.TENSORS if return_type is not None: SCREAMING_SNAKE_CASE_ : List[Any] = return_type if clean_up_tokenization_spaces is not None: SCREAMING_SNAKE_CASE_ : str = clean_up_tokenization_spaces if stop_sequence is not None: SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase) if len(_lowerCAmelCase) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''') SCREAMING_SNAKE_CASE_ : Tuple = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def _SCREAMING_SNAKE_CASE ( self : Any , *lowercase_ : int , **lowercase_ : Dict): '''simple docstring''' if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True}) return super()._parse_and_tokenize(*_lowerCAmelCase , **_lowerCAmelCase) def __call__( self : str , lowercase_ : int , **lowercase_ : Optional[int]): '''simple docstring''' return super().__call__(_lowerCAmelCase , **_lowerCAmelCase) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : Any="" , lowercase_ : Optional[Any]=None , **lowercase_ : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.tokenizer( prefix + prompt_text , padding=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors=self.framework) SCREAMING_SNAKE_CASE_ : List[Any] = prompt_text if handle_long_generation == "hole": SCREAMING_SNAKE_CASE_ : Tuple = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: SCREAMING_SNAKE_CASE_ : str = generate_kwargs['''max_new_tokens'''] else: SCREAMING_SNAKE_CASE_ : str = generate_kwargs.get('''max_length''' , self.model.config.max_length) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''') if cur_len + new_tokens > self.tokenizer.model_max_length: SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''') SCREAMING_SNAKE_CASE_ : List[str] = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: SCREAMING_SNAKE_CASE_ : List[Any] = inputs['''attention_mask'''][:, -keep_length:] return inputs def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : List[Any] , **lowercase_ : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_inputs['''input_ids'''] SCREAMING_SNAKE_CASE_ : List[str] = model_inputs.get('''attention_mask''' , _lowerCAmelCase) # Allow empty prompts if input_ids.shape[1] == 0: SCREAMING_SNAKE_CASE_ : Tuple = None SCREAMING_SNAKE_CASE_ : List[Any] = None SCREAMING_SNAKE_CASE_ : str = 1 else: SCREAMING_SNAKE_CASE_ : Optional[Any] = input_ids.shape[0] SCREAMING_SNAKE_CASE_ : List[Any] = model_inputs.pop('''prompt_text''') # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. SCREAMING_SNAKE_CASE_ : int = generate_kwargs.pop('''prefix_length''' , 0) if prefix_length > 0: SCREAMING_SNAKE_CASE_ : Dict = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: SCREAMING_SNAKE_CASE_ : List[str] = generate_kwargs.get('''max_length''') or self.model.config.max_length generate_kwargs["max_length"] += prefix_length SCREAMING_SNAKE_CASE_ : str = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL SCREAMING_SNAKE_CASE_ : List[Any] = self.model.generate(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase , **_lowerCAmelCase) SCREAMING_SNAKE_CASE_ : List[Any] = generated_sequence.shape[0] if self.framework == "pt": SCREAMING_SNAKE_CASE_ : str = generated_sequence.reshape(_lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:]) elif self.framework == "tf": SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.reshape(_lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:])) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[int]=ReturnType.FULL_TEXT , lowercase_ : Any=True): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = model_outputs['''generated_sequence'''][0] SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_outputs['''input_ids'''] SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_outputs['''prompt_text'''] SCREAMING_SNAKE_CASE_ : List[Any] = generated_sequence.numpy().tolist() SCREAMING_SNAKE_CASE_ : Any = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text SCREAMING_SNAKE_CASE_ : Any = self.tokenizer.decode( _lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: SCREAMING_SNAKE_CASE_ : Any = 0 else: SCREAMING_SNAKE_CASE_ : int = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase , )) if return_type == ReturnType.FULL_TEXT: SCREAMING_SNAKE_CASE_ : Any = prompt_text + text[prompt_length:] else: SCREAMING_SNAKE_CASE_ : Any = text[prompt_length:] SCREAMING_SNAKE_CASE_ : int = {'''generated_text''': all_text} records.append(_lowerCAmelCase) return records
354
"""simple docstring""" UpperCAmelCase_ : Optional[int] = 8.3_1_4_4_5_9_8 def _A (__a , __a ) -> float: """simple docstring""" if temperature < 0: raise Exception('''Temperature cannot be less than 0 K''' ) if molar_mass <= 0: raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example UpperCAmelCase_ : str = 300 UpperCAmelCase_ : str = 28 UpperCAmelCase_ : Any = rms_speed_of_molecule(temperature, molar_mass) print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
318
0
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def _A (__a , __a , __a , __a , __a=True , __a="pt" ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = {'''add_prefix_space''': True} if isinstance(_A , _A ) and not line.startswith(''' ''' ) else {} SCREAMING_SNAKE_CASE_ : str = padding_side return tokenizer( [line] , max_length=_A , padding='''max_length''' if pad_to_max_length else None , truncation=_A , return_tensors=_A , add_special_tokens=_A , **_A , ) def _A (__a , __a , __a=None , ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = input_ids.ne(_A ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class lowerCAmelCase__ ( lowerCAmelCase__ ): '''simple docstring''' def __init__( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Tuple="train" , lowercase_ : str=None , lowercase_ : Optional[Any]=None , lowercase_ : List[str]=None , lowercase_ : List[str]="" , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE_ : Tuple = Path(_SCREAMING_SNAKE_CASE).joinpath(type_path + '''.source''') SCREAMING_SNAKE_CASE_ : List[str] = Path(_SCREAMING_SNAKE_CASE).joinpath(type_path + '''.target''') SCREAMING_SNAKE_CASE_ : List[str] = self.get_char_lens(self.src_file) SCREAMING_SNAKE_CASE_ : Dict = max_source_length SCREAMING_SNAKE_CASE_ : str = max_target_length assert min(self.src_lens) > 0, F'found empty line in {self.src_file}' SCREAMING_SNAKE_CASE_ : Tuple = tokenizer SCREAMING_SNAKE_CASE_ : int = prefix if n_obs is not None: SCREAMING_SNAKE_CASE_ : Tuple = self.src_lens[:n_obs] SCREAMING_SNAKE_CASE_ : List[str] = src_lang SCREAMING_SNAKE_CASE_ : Optional[Any] = tgt_lang def __len__( self : Union[str, Any]): '''simple docstring''' return len(self.src_lens) def __getitem__( self : List[str] , lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = index + 1 # linecache starts at 1 SCREAMING_SNAKE_CASE_ : Dict = self.prefix + linecache.getline(str(self.src_file) , _SCREAMING_SNAKE_CASE).rstrip('''\n''') SCREAMING_SNAKE_CASE_ : str = linecache.getline(str(self.tgt_file) , _SCREAMING_SNAKE_CASE).rstrip('''\n''') assert source_line, F'empty source line for index {index}' assert tgt_line, F'empty tgt line for index {index}' # Need to add eos token manually for T5 if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right SCREAMING_SNAKE_CASE_ : List[Any] = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE) else self.tokenizer ) SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE) else self.tokenizer SCREAMING_SNAKE_CASE_ : int = encode_line(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.max_source_length , '''right''') SCREAMING_SNAKE_CASE_ : Any = encode_line(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.max_target_length , '''right''') SCREAMING_SNAKE_CASE_ : Optional[Any] = source_inputs['''input_ids'''].squeeze() SCREAMING_SNAKE_CASE_ : Optional[int] = target_inputs['''input_ids'''].squeeze() SCREAMING_SNAKE_CASE_ : Union[str, Any] = source_inputs['''attention_mask'''].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def _SCREAMING_SNAKE_CASE ( lowercase_ : List[Any]): '''simple docstring''' return [len(_SCREAMING_SNAKE_CASE) for x in Path(_SCREAMING_SNAKE_CASE).open().readlines()] def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = torch.stack([x['''input_ids'''] for x in batch]) SCREAMING_SNAKE_CASE_ : int = torch.stack([x['''attention_mask'''] for x in batch]) SCREAMING_SNAKE_CASE_ : int = torch.stack([x['''decoder_input_ids'''] for x in batch]) SCREAMING_SNAKE_CASE_ : Optional[int] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE_ : Optional[Any] = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = trim_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = trim_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : Optional[int] = { '''input_ids''': source_ids, '''attention_mask''': source_mask, '''decoder_input_ids''': y, } return batch UpperCAmelCase_ : str = getLogger(__name__) def _A (__a ) -> Union[str, Any]: """simple docstring""" return list(itertools.chain.from_iterable(_A ) ) def _A (__a ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = get_git_info() save_json(_A , os.path.join(_A , '''git_log.json''' ) ) def _A (__a , __a , __a=4 , **__a ) -> List[str]: """simple docstring""" with open(_A , '''w''' ) as f: json.dump(_A , _A , indent=_A , **_A ) def _A (__a ) -> Tuple: """simple docstring""" with open(_A ) as f: return json.load(_A ) def _A () -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = git.Repo(search_parent_directories=_A ) SCREAMING_SNAKE_CASE_ : str = { '''repo_id''': str(_A ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), '''hostname''': str(socket.gethostname() ), } return repo_infos def _A (__a , __a ) -> List: """simple docstring""" return list(map(_A , _A ) ) def _A (__a , __a ) -> Optional[int]: """simple docstring""" with open(_A , '''wb''' ) as f: return pickle.dump(_A , _A ) def _A (__a ) -> Any: """simple docstring""" def remove_articles(__a ): return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , _A ) def white_space_fix(__a ): return " ".join(text.split() ) def remove_punc(__a ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__a ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) ) def _A (__a , __a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = normalize_answer(_A ).split() SCREAMING_SNAKE_CASE_ : Union[str, Any] = normalize_answer(_A ).split() SCREAMING_SNAKE_CASE_ : Tuple = Counter(_A ) & Counter(_A ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = sum(common.values() ) if num_same == 0: return 0 SCREAMING_SNAKE_CASE_ : str = 1.0 * num_same / len(_A ) SCREAMING_SNAKE_CASE_ : Optional[int] = 1.0 * num_same / len(_A ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = (2 * precision * recall) / (precision + recall) return fa def _A (__a , __a ) -> Union[str, Any]: """simple docstring""" return normalize_answer(_A ) == normalize_answer(_A ) def _A (__a , __a ) -> Dict: """simple docstring""" assert len(_A ) == len(_A ) SCREAMING_SNAKE_CASE_ : str = 0 for hypo, pred in zip(_A , _A ): em += exact_match_score(_A , _A ) if len(_A ) > 0: em /= len(_A ) return {"em": em} def _A (__a ) -> List[Any]: """simple docstring""" return model_prefix.startswith('''rag''' ) def _A (__a , __a , __a ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead SCREAMING_SNAKE_CASE_ : List[str] = '''dropout_rate''' for p in extra_params: if getattr(_A , _A , _A ): if not hasattr(_A , _A ) and not hasattr(_A , equivalent_param[p] ): logger.info('''config doesn\'t have a `{}` attribute'''.format(_A ) ) delattr(_A , _A ) continue SCREAMING_SNAKE_CASE_ : List[str] = p if hasattr(_A , _A ) else equivalent_param[p] setattr(_A , _A , getattr(_A , _A ) ) delattr(_A , _A ) return hparams, config
355
"""simple docstring""" import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList UpperCAmelCase_ : Union[str, Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""] class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : Dict=1): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer SCREAMING_SNAKE_CASE_ : Optional[int] = dataset SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowercase_) if n_tasks is None else n_tasks SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies def __iter__( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = [] for task in range(self.n_tasks): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip()) SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''') for task in range(self.n_tasks): for _ in range(self.n_copies): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = start_length SCREAMING_SNAKE_CASE_ : List[Any] = eof_strings SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer def __call__( self : Optional[int] , lowercase_ : Any , lowercase_ : int , **lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.tokenizer.batch_decode(input_ids[:, self.start_length :]) SCREAMING_SNAKE_CASE_ : Tuple = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings)) return all(lowercase_) def _A (__a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = re.split('''(%s)''' % '''|'''.join(__a ) , __a ) # last string should be "" return "".join(string_list[:-2] ) def _A (__a , __a , __a , __a , __a , __a=20 , **__a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = defaultdict(__a ) # dict of list of generated tokens for step, batch in tqdm(enumerate(__a ) ): with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Optional[int] = batch['''ids'''].shape[-1] SCREAMING_SNAKE_CASE_ : Tuple = accelerator.unwrap_model(__a ).generate( input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__a , **__a ) # each task is generated batch_size times SCREAMING_SNAKE_CASE_ : List[Any] = batch['''task_id'''].repeat(__a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.pad_across_processes( __a , dim=1 , pad_index=tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) ) SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy() SCREAMING_SNAKE_CASE_ : Optional[Any] = generated_tasks.cpu().numpy() for task, generated_tokens in zip(__a , __a ): gen_token_dict[task].append(__a ) SCREAMING_SNAKE_CASE_ : int = [[] for _ in range(__a )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) code_gens[task].append(remove_last_block(__a ) ) return code_gens def _A () -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser(__a ) SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric SCREAMING_SNAKE_CASE_ : Any = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing SCREAMING_SNAKE_CASE_ : str = '''false''' if args.num_workers is None: SCREAMING_SNAKE_CASE_ : Optional[Any] = multiprocessing.cpu_count() # Use dataset load to feed to accelerate SCREAMING_SNAKE_CASE_ : Tuple = Accelerator() set_seed(args.seed , device_specific=__a ) # Load model and tokenizer SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt ) SCREAMING_SNAKE_CASE_ : Dict = tokenizer.eos_token SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings SCREAMING_SNAKE_CASE_ : List[str] = { '''do_sample''': args.do_sample, '''temperature''': args.temperature, '''max_new_tokens''': args.max_new_tokens, '''top_p''': args.top_p, '''top_k''': args.top_k, '''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ), } # Load evaluation dataset and metric SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset('''openai_humaneval''' ) SCREAMING_SNAKE_CASE_ : str = load_metric('''code_eval''' ) SCREAMING_SNAKE_CASE_ : int = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] ) SCREAMING_SNAKE_CASE_ : List[str] = args.n_samples // args.batch_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = TokenizedDataset(__a , human_eval['''test'''] , n_copies=__a , n_tasks=__a ) # do not confuse args.batch_size, which is actually the num_return_sequences SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(__a , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] ) except ValueError as exception: print( '''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`''' ''' flag to enable code evaluation.''' ) raise exception SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(__a , __a ) SCREAMING_SNAKE_CASE_ : List[Any] = complete_code( __a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , ) if accelerator.is_main_process: SCREAMING_SNAKE_CASE_ : int = [] for task in tqdm(range(__a ) ): SCREAMING_SNAKE_CASE_ : Tuple = human_eval['''test'''][task]['''test'''] SCREAMING_SNAKE_CASE_ : Tuple = f'check({human_eval["test"][task]["entry_point"]})' references.append('''\n''' + test_func + '''\n''' + entry_point ) # Evaluate completions with "code_eval" metric SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = code_eval_metric.compute( references=__a , predictions=__a , num_workers=args.num_workers ) print(f'Results: {pass_at_k}' ) # Save results to json file with open(args.output_file , '''w''' ) as fp: json.dump(__a , __a ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
318
0
"""simple docstring""" class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = {} def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' print(self.vertex) for i in self.vertex: print(lowercase_ , ''' -> ''' , ''' -> '''.join([str(lowercase_) for j in self.vertex[i]])) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : int , lowercase_ : int): '''simple docstring''' if from_vertex in self.vertex: self.vertex[from_vertex].append(lowercase_) else: # else make a new vertex SCREAMING_SNAKE_CASE_ : List[str] = [to_vertex] def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = [False] * len(self.vertex) # call the recursive helper function for i in range(len(self.vertex)): if not visited[i]: self.dfs_recursive(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : int , lowercase_ : list): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = True print(lowercase_ , end=''' ''') # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(lowercase_ , lowercase_) if __name__ == "__main__": UpperCAmelCase_ : Optional[Any] = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("""DFS:""") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
356
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["image_processor", "feature_extractor"] __UpperCamelCase = "TvltImageProcessor" __UpperCamelCase = "TvltFeatureExtractor" def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]): '''simple docstring''' super().__init__(image_processor=lowercase_ , feature_extractor=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor SCREAMING_SNAKE_CASE_ : Optional[Any] = feature_extractor def __call__( self : Any , lowercase_ : str=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : str=None , lowercase_ : int=False , lowercase_ : Union[str, Any]=False , *lowercase_ : List[Any] , **lowercase_ : List[str] , ): '''simple docstring''' if images is None and audio is None: raise ValueError('''You need to specify either an `images` or `audio` input to process.''') SCREAMING_SNAKE_CASE_ : Any = None if images is not None: SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor(lowercase_ , mask_pixel=lowercase_ , *lowercase_ , **lowercase_) if images_mixed is not None: SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor(lowercase_ , is_mixed=lowercase_ , *lowercase_ , **lowercase_) if audio is not None: SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor( lowercase_ , *lowercase_ , sampling_rate=lowercase_ , mask_audio=lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = {} if audio is not None: output_dict.update(lowercase_) if images is not None: output_dict.update(lowercase_) if images_mixed_dict is not None: output_dict.update(lowercase_) return output_dict @property def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor.model_input_names SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
318
0
"""simple docstring""" from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def _A (__a , __a , __a = 10**-10 ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = a while True: SCREAMING_SNAKE_CASE_ : Tuple = Decimal(__a ) - ( Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(__a ) ) < precision: # noqa: S307 return float(__a ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''') # Find root of polynomial print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''') # Find Square Root of 5 print(f'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''') # Exponential Roots print(f'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
357
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "SpeechT5FeatureExtractor" __UpperCamelCase = "SpeechT5Tokenizer" def __init__( self : Any , lowercase_ : Dict , lowercase_ : Optional[Any]): '''simple docstring''' super().__init__(lowercase_ , lowercase_) def __call__( self : List[Any] , *lowercase_ : List[Any] , **lowercase_ : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''audio''' , lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('''text''' , lowercase_) SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('''text_target''' , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''audio_target''' , lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''sampling_rate''' , lowercase_) if audio is not None and text is not None: raise ValueError( '''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''') if audio_target is not None and text_target is not None: raise ValueError( '''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''') if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( '''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''') if audio is not None: SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_) elif text is not None: SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(lowercase_ , **lowercase_) else: SCREAMING_SNAKE_CASE_ : Any = None if audio_target is not None: SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor(audio_target=lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = targets['''input_values'''] elif text_target is not None: SCREAMING_SNAKE_CASE_ : int = self.tokenizer(lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = targets['''input_ids'''] else: SCREAMING_SNAKE_CASE_ : int = None if inputs is None: return targets if targets is not None: SCREAMING_SNAKE_CASE_ : Union[str, Any] = labels SCREAMING_SNAKE_CASE_ : Optional[Any] = targets.get('''attention_mask''') if decoder_attention_mask is not None: SCREAMING_SNAKE_CASE_ : Any = decoder_attention_mask return inputs def _SCREAMING_SNAKE_CASE ( self : Tuple , *lowercase_ : Tuple , **lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''input_values''' , lowercase_) SCREAMING_SNAKE_CASE_ : int = kwargs.pop('''input_ids''' , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''labels''' , lowercase_) if input_values is not None and input_ids is not None: raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''') if input_values is None and input_ids is None and labels is None: raise ValueError( '''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''') if input_values is not None: SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_) elif input_ids is not None: SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.pad(lowercase_ , **lowercase_) else: SCREAMING_SNAKE_CASE_ : List[Any] = None if labels is not None: if "input_ids" in labels or (isinstance(lowercase_ , lowercase_) and "input_ids" in labels[0]): SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer.pad(lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : Dict = targets['''input_ids'''] else: SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.feature_size SCREAMING_SNAKE_CASE_ : Optional[int] = self.feature_extractor.num_mel_bins SCREAMING_SNAKE_CASE_ : str = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : str = feature_size_hack SCREAMING_SNAKE_CASE_ : Dict = targets['''input_values'''] else: SCREAMING_SNAKE_CASE_ : List[Any] = None if inputs is None: return targets if targets is not None: SCREAMING_SNAKE_CASE_ : Dict = labels SCREAMING_SNAKE_CASE_ : List[str] = targets.get('''attention_mask''') if decoder_attention_mask is not None: SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_attention_mask return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : Tuple): '''simple docstring''' return self.tokenizer.batch_decode(*lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowercase_ : Dict , **lowercase_ : List[Any]): '''simple docstring''' return self.tokenizer.decode(*lowercase_ , **lowercase_)
318
0
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCAmelCase__ ( lowerCamelCase__ ): '''simple docstring''' def __init__( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple): '''simple docstring''' super().__init__() self.register_modules(unet=lowercase__ , scheduler=lowercase__) @torch.no_grad() def __call__( self : Any , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ): '''simple docstring''' if audio_length_in_s is None: SCREAMING_SNAKE_CASE_ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate SCREAMING_SNAKE_CASE_ : Tuple = audio_length_in_s * self.unet.config.sample_rate SCREAMING_SNAKE_CASE_ : int = 2 ** len(self.unet.up_blocks) if sample_size < 3 * down_scale_factor: raise ValueError( F'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to' F' {3 * down_scale_factor / self.unet.config.sample_rate}.') SCREAMING_SNAKE_CASE_ : str = int(lowercase__) if sample_size % down_scale_factor != 0: SCREAMING_SNAKE_CASE_ : Dict = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled' F' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising' ''' process.''') SCREAMING_SNAKE_CASE_ : Any = int(lowercase__) SCREAMING_SNAKE_CASE_ : List[Any] = next(iter(self.unet.parameters())).dtype SCREAMING_SNAKE_CASE_ : str = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(lowercase__ , lowercase__) and len(lowercase__) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(lowercase__)}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.') SCREAMING_SNAKE_CASE_ : List[str] = randn_tensor(lowercase__ , generator=lowercase__ , device=self.device , dtype=lowercase__) # set step values self.scheduler.set_timesteps(lowercase__ , device=audio.device) SCREAMING_SNAKE_CASE_ : Optional[Any] = self.scheduler.timesteps.to(lowercase__) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output SCREAMING_SNAKE_CASE_ : Tuple = self.unet(lowercase__ , lowercase__).sample # 2. compute previous image: x_t -> t_t-1 SCREAMING_SNAKE_CASE_ : List[Any] = self.scheduler.step(lowercase__ , lowercase__ , lowercase__).prev_sample SCREAMING_SNAKE_CASE_ : Dict = audio.clamp(-1 , 1).float().cpu().numpy() SCREAMING_SNAKE_CASE_ : Optional[int] = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=lowercase__)
358
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def _A (__a ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = np.inf def set_batch_size(__a ) -> None: nonlocal batch_size if isinstance(__a , __a ): SCREAMING_SNAKE_CASE_ : Tuple = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__a , __a ): SCREAMING_SNAKE_CASE_ : int = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__a , __a ) and feature.dtype == "binary": SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__a , __a ) return None if batch_size is np.inf else batch_size class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Any , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Optional[int] , ): '''simple docstring''' super().__init__( lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , ) SCREAMING_SNAKE_CASE_ : Any = path_or_paths if isinstance(lowercase_ , lowercase_) else {self.split: path_or_paths} SCREAMING_SNAKE_CASE_ : Any = _PACKAGED_DATASETS_MODULES['''parquet'''][1] SCREAMING_SNAKE_CASE_ : Union[str, Any] = Parquet( cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , hash=lowercase_ , **lowercase_ , ) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' if self.streaming: SCREAMING_SNAKE_CASE_ : str = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE_ : Optional[Any] = None SCREAMING_SNAKE_CASE_ : Optional[int] = None SCREAMING_SNAKE_CASE_ : Tuple = None SCREAMING_SNAKE_CASE_ : Dict = None self.builder.download_and_prepare( download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , ) SCREAMING_SNAKE_CASE_ : Any = self.builder.as_dataset( split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory) return dataset class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Tuple , lowercase_ : Dataset , lowercase_ : Union[PathLike, BinaryIO] , lowercase_ : Optional[int] = None , **lowercase_ : Dict , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = dataset SCREAMING_SNAKE_CASE_ : Dict = path_or_buf SCREAMING_SNAKE_CASE_ : List[Any] = batch_size or get_writer_batch_size(dataset.features) SCREAMING_SNAKE_CASE_ : Any = parquet_writer_kwargs def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike)): with open(self.path_or_buf , '''wb+''') as buffer: SCREAMING_SNAKE_CASE_ : Optional[Any] = self._write(file_obj=lowercase_ , batch_size=lowercase_ , **self.parquet_writer_kwargs) else: SCREAMING_SNAKE_CASE_ : str = self._write(file_obj=self.path_or_buf , batch_size=lowercase_ , **self.parquet_writer_kwargs) return written def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : BinaryIO , lowercase_ : int , **lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = 0 SCREAMING_SNAKE_CASE_ : Optional[int] = parquet_writer_kwargs.pop('''path_or_buf''' , lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.dataset.features.arrow_schema SCREAMING_SNAKE_CASE_ : Tuple = pq.ParquetWriter(lowercase_ , schema=lowercase_ , **lowercase_) for offset in logging.tqdm( range(0 , len(self.dataset) , lowercase_) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): SCREAMING_SNAKE_CASE_ : List[Any] = query_table( table=self.dataset._data , key=slice(lowercase_ , offset + batch_size) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(lowercase_) written += batch.nbytes writer.close() return written
318
0
"""simple docstring""" from __future__ import annotations def _A (__a ) -> None: """simple docstring""" create_state_space_tree(_UpperCAmelCase , [] , 0 , [0 for i in range(len(_UpperCAmelCase ) )] ) def _A (__a , __a , __a , __a , ) -> None: """simple docstring""" if index == len(_UpperCAmelCase ): print(_UpperCAmelCase ) return for i in range(len(_UpperCAmelCase ) ): if not index_used[i]: current_sequence.append(sequence[i] ) SCREAMING_SNAKE_CASE_ : int = True create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 , _UpperCAmelCase ) current_sequence.pop() SCREAMING_SNAKE_CASE_ : Any = False UpperCAmelCase_ : list[int | str] = [3, 1, 2, 4] generate_all_permutations(sequence) UpperCAmelCase_ : list[int | str] = ["A", "B", "C"] generate_all_permutations(sequence_a)
359
"""simple docstring""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : str = logging.get_logger(__name__) UpperCAmelCase_ : Optional[Any] = ["""model.decoder.embed_positions.weights"""] def _A (__a ) -> Dict: """simple docstring""" if "emb" in name: SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''emb''' , '''model.decoder.embed_tokens''' ) if "transformer" in name: SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''transformer''' , '''model.decoder''' ) if "cross_attention" in name: SCREAMING_SNAKE_CASE_ : str = name.replace('''cross_attention''' , '''encoder_attn''' ) if "linear1" in name: SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''linear1''' , '''fc1''' ) if "linear2" in name: SCREAMING_SNAKE_CASE_ : str = name.replace('''linear2''' , '''fc2''' ) if "norm1" in name: SCREAMING_SNAKE_CASE_ : Any = name.replace('''norm1''' , '''self_attn_layer_norm''' ) if "norm_cross" in name: SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' ) if "norm2" in name: SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''norm2''' , '''final_layer_norm''' ) if "out_norm" in name: SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''out_norm''' , '''model.decoder.layer_norm''' ) if "linears" in name: SCREAMING_SNAKE_CASE_ : Dict = name.replace('''linears''' , '''lm_heads''' ) if "condition_provider.conditioners.description.output_proj" in name: SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' ) return name def _A (__a , __a ) -> Tuple[Dict, Dict]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(state_dict.keys() ) SCREAMING_SNAKE_CASE_ : int = {} for key in keys: SCREAMING_SNAKE_CASE_ : int = state_dict.pop(__a ) SCREAMING_SNAKE_CASE_ : int = rename_keys(__a ) if "in_proj_weight" in key: # split fused qkv proj SCREAMING_SNAKE_CASE_ : List[str] = val[:hidden_size, :] SCREAMING_SNAKE_CASE_ : List[str] = val[hidden_size : 2 * hidden_size, :] SCREAMING_SNAKE_CASE_ : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: SCREAMING_SNAKE_CASE_ : int = val else: SCREAMING_SNAKE_CASE_ : Any = val return state_dict, enc_dec_proj_state_dict def _A (__a ) -> MusicgenDecoderConfig: """simple docstring""" if checkpoint == "small": # default config values SCREAMING_SNAKE_CASE_ : Optional[int] = 10_24 SCREAMING_SNAKE_CASE_ : Tuple = 24 SCREAMING_SNAKE_CASE_ : Optional[Any] = 16 elif checkpoint == "medium": SCREAMING_SNAKE_CASE_ : List[str] = 15_36 SCREAMING_SNAKE_CASE_ : Optional[int] = 48 SCREAMING_SNAKE_CASE_ : Optional[int] = 24 elif checkpoint == "large": SCREAMING_SNAKE_CASE_ : Optional[Any] = 20_48 SCREAMING_SNAKE_CASE_ : Optional[int] = 48 SCREAMING_SNAKE_CASE_ : int = 32 else: raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' ) SCREAMING_SNAKE_CASE_ : List[Any] = MusicgenDecoderConfig( hidden_size=__a , ffn_dim=hidden_size * 4 , num_hidden_layers=__a , num_attention_heads=__a , ) return config @torch.no_grad() def _A (__a , __a=None , __a=None , __a="cpu" ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = MusicGen.get_pretrained(__a , device=__a ) SCREAMING_SNAKE_CASE_ : Dict = decoder_config_from_checkpoint(__a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = fairseq_model.lm.state_dict() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = rename_state_dict( __a , hidden_size=decoder_config.hidden_size ) SCREAMING_SNAKE_CASE_ : Optional[Any] = TaEncoderModel.from_pretrained('''t5-base''' ) SCREAMING_SNAKE_CASE_ : List[str] = EncodecModel.from_pretrained('''facebook/encodec_32khz''' ) SCREAMING_SNAKE_CASE_ : int = MusicgenForCausalLM(__a ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = decoder.load_state_dict(__a , strict=__a ) for key in missing_keys.copy(): if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__a ) if len(__a ) > 0: raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' ) if len(__a ) > 0: raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' ) # init the composite model SCREAMING_SNAKE_CASE_ : str = MusicgenForConditionalGeneration(text_encoder=__a , audio_encoder=__a , decoder=__a ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__a ) # check we can do a forward pass SCREAMING_SNAKE_CASE_ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=__a , decoder_input_ids=__a ).logits if logits.shape != (8, 1, 20_48): raise ValueError('''Incorrect shape for logits''' ) # now construct the processor SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained('''t5-base''' ) SCREAMING_SNAKE_CASE_ : str = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' ) SCREAMING_SNAKE_CASE_ : Tuple = MusicgenProcessor(feature_extractor=__a , tokenizer=__a ) # set the appropriate bos/pad token ids SCREAMING_SNAKE_CASE_ : str = 20_48 SCREAMING_SNAKE_CASE_ : List[Any] = 20_48 # set other default generation config params SCREAMING_SNAKE_CASE_ : int = int(30 * audio_encoder.config.frame_rate ) SCREAMING_SNAKE_CASE_ : str = True SCREAMING_SNAKE_CASE_ : Optional[Any] = 3.0 if pytorch_dump_folder is not None: Path(__a ).mkdir(exist_ok=__a ) logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' ) model.save_pretrained(__a ) processor.save_pretrained(__a ) if repo_id: logger.info(f'Pushing model {checkpoint} to {repo_id}' ) model.push_to_hub(__a ) processor.push_to_hub(__a ) if __name__ == "__main__": UpperCAmelCase_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint""", default="""small""", type=str, help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""", ) parser.add_argument( """--pytorch_dump_folder""", required=True, default=None, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) parser.add_argument( """--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda.""" ) UpperCAmelCase_ : Dict = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
318
0
from collections.abc import Callable import numpy as np def _A (__a , __a , __a , __a , __a ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = int(np.ceil((x_end - xa) / step_size ) ) SCREAMING_SNAKE_CASE_ : Dict = np.zeros((n + 1,) ) SCREAMING_SNAKE_CASE_ : Optional[int] = ya SCREAMING_SNAKE_CASE_ : Optional[int] = xa for k in range(a__ ): SCREAMING_SNAKE_CASE_ : int = y[k] + step_size * ode_func(a__ , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
360
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def _A (__a ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b def _A (__a ) -> np.ndarray: """simple docstring""" return (gray > 1_27) & (gray <= 2_55) def _A (__a , __a ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros_like(__a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image SCREAMING_SNAKE_CASE_ : Union[str, Any] = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): SCREAMING_SNAKE_CASE_ : Optional[Any] = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() SCREAMING_SNAKE_CASE_ : Any = int(summation > 0 ) return output if __name__ == "__main__": # read original image UpperCAmelCase_ : Dict = Path(__file__).resolve().parent / """image_data""" / """lena.jpg""" UpperCAmelCase_ : List[Any] = np.array(Image.open(lena_path)) # kernel to be applied UpperCAmelCase_ : Any = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) UpperCAmelCase_ : Tuple = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image UpperCAmelCase_ : List[str] = Image.fromarray(output).convert("""RGB""") pil_img.save("""result_dilation.png""")
318
0
"""simple docstring""" import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int]=2 , lowercase_ : int=8 , lowercase_ : Tuple=True , lowercase_ : Any=True , lowercase_ : Optional[Any]=True , lowercase_ : Dict=True , lowercase_ : Union[str, Any]=99 , lowercase_ : Union[str, Any]=16 , lowercase_ : Dict=5 , lowercase_ : int=2 , lowercase_ : Union[str, Any]=36 , lowercase_ : str="gelu" , lowercase_ : List[str]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Any=512 , lowercase_ : Any=16 , lowercase_ : Optional[Any]=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Tuple=3 , lowercase_ : List[str]=4 , lowercase_ : Optional[int]=None , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = parent SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE_ : Optional[int] = seq_length SCREAMING_SNAKE_CASE_ : int = is_training SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask SCREAMING_SNAKE_CASE_ : int = use_token_type_ids SCREAMING_SNAKE_CASE_ : Dict = use_labels SCREAMING_SNAKE_CASE_ : Dict = vocab_size SCREAMING_SNAKE_CASE_ : List[str] = hidden_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE_ : Any = num_attention_heads SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : Dict = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size SCREAMING_SNAKE_CASE_ : Any = type_sequence_label_size SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range SCREAMING_SNAKE_CASE_ : int = num_labels SCREAMING_SNAKE_CASE_ : Any = num_choices SCREAMING_SNAKE_CASE_ : Dict = scope def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ : List[str] = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length]) SCREAMING_SNAKE_CASE_ : List[Any] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE_ : Dict = None SCREAMING_SNAKE_CASE_ : Tuple = None SCREAMING_SNAKE_CASE_ : Union[str, Any] = None if self.use_labels: SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size] , self.num_choices) SCREAMING_SNAKE_CASE_ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.get_config() SCREAMING_SNAKE_CASE_ : Optional[Any] = 300 return config def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) : Dict = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ : int = True SCREAMING_SNAKE_CASE_ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = MraModel(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_ : List[str] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__) SCREAMING_SNAKE_CASE_ : Tuple = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__) SCREAMING_SNAKE_CASE_ : Tuple = model(UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : str , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : str , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = True SCREAMING_SNAKE_CASE_ : Any = MraModel(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_ : Union[str, Any] = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , ) SCREAMING_SNAKE_CASE_ : int = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , ) SCREAMING_SNAKE_CASE_ : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = MraForMaskedLM(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_ : Any = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = MraForQuestionAnswering(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_ : Optional[int] = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels SCREAMING_SNAKE_CASE_ : str = MraForSequenceClassification(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_ : str = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Dict , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels SCREAMING_SNAKE_CASE_ : Union[str, Any] = MraForTokenClassification(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : Dict , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.num_choices SCREAMING_SNAKE_CASE_ : Dict = MraForMultipleChoice(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_ : Optional[int] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE_ : int = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE_ : str = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) : Any = config_and_inputs SCREAMING_SNAKE_CASE_ : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = () def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = MraModelTester(self) SCREAMING_SNAKE_CASE_ : str = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE_ : str = type self.model_tester.create_and_check_model(*UpperCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : Optional[int] = MraModel.from_pretrained(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__) @unittest.skip(reason='''MRA does not output attentions''') def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' return @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = MraModel.from_pretrained('''uw-madison/mra-base-512-4''') SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.arange(256).unsqueeze(0) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : int = model(UpperCAmelCase__)[0] SCREAMING_SNAKE_CASE_ : int = torch.Size((1, 256, 768)) self.assertEqual(output.shape , UpperCAmelCase__) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor( [[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)) @slow def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''') SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.arange(256).unsqueeze(0) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Optional[int] = model(UpperCAmelCase__)[0] SCREAMING_SNAKE_CASE_ : Any = 50265 SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Size((1, 256, vocab_size)) self.assertEqual(output.shape , UpperCAmelCase__) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor( [[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''') SCREAMING_SNAKE_CASE_ : Dict = torch.arange(4096).unsqueeze(0) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Optional[int] = model(UpperCAmelCase__)[0] SCREAMING_SNAKE_CASE_ : Union[str, Any] = 50265 SCREAMING_SNAKE_CASE_ : Dict = torch.Size((1, 4096, vocab_size)) self.assertEqual(output.shape , UpperCAmelCase__) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor( [[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4))
361
"""simple docstring""" from collections import defaultdict def _A (__a , __a ) -> bool: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = first_str.lower().strip() SCREAMING_SNAKE_CASE_ : List[Any] = second_str.lower().strip() # Remove whitespace SCREAMING_SNAKE_CASE_ : Dict = first_str.replace(''' ''' , '''''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(__a ) != len(__a ): return False # Default values for count should be 0 SCREAMING_SNAKE_CASE_ : defaultdict[str, int] = defaultdict(__a ) # For each character in input strings, # increment count in the corresponding for i in range(len(__a ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ : Any = input("""Enter the first string """).strip() UpperCAmelCase_ : Optional[int] = input("""Enter the second string """).strip() UpperCAmelCase_ : Union[str, Any] = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
318
0
"""simple docstring""" from typing import Dict from .base import GenericTensor, Pipeline class lowerCAmelCase__ ( UpperCamelCase_ ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , **lowercase_ : Tuple): '''simple docstring''' if tokenize_kwargs is None: SCREAMING_SNAKE_CASE_ : Any = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( '''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''') SCREAMING_SNAKE_CASE_ : str = truncation SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenize_kwargs SCREAMING_SNAKE_CASE_ : int = {} if return_tensors is not None: SCREAMING_SNAKE_CASE_ : Any = return_tensors return preprocess_params, {}, postprocess_params def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Optional[int] , **lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.framework SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer(_a , return_tensors=_a , **_a) return model_inputs def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.model(**_a) return model_outputs def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Any , lowercase_ : Optional[int]=False): '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : Tuple): '''simple docstring''' return super().__call__(*_a , **_a)
362
"""simple docstring""" import argparse from collections import defaultdict import yaml UpperCAmelCase_ : Optional[Any] = """docs/source/en/_toctree.yml""" def _A (__a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = defaultdict(__a ) for doc in model_doc: counts[doc["local"]] += 1 SCREAMING_SNAKE_CASE_ : List[Any] = [key for key, value in counts.items() if value > 1] SCREAMING_SNAKE_CASE_ : int = [] for duplicate_key in duplicates: SCREAMING_SNAKE_CASE_ : List[str] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(__a ) > 1: raise ValueError( f'{duplicate_key} is present several times in the documentation table of content at ' '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(__a , key=lambda __a : s["title"].lower() ) def _A (__a=False ) -> Tuple: """simple docstring""" with open(__a , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE_ : Dict = yaml.safe_load(f.read() ) # Get to the API doc SCREAMING_SNAKE_CASE_ : Any = 0 while content[api_idx]["title"] != "API": api_idx += 1 SCREAMING_SNAKE_CASE_ : str = content[api_idx]['''sections'''] # Then to the model doc SCREAMING_SNAKE_CASE_ : List[Any] = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 SCREAMING_SNAKE_CASE_ : Optional[int] = api_doc[model_idx]['''sections'''] SCREAMING_SNAKE_CASE_ : str = [(idx, section) for idx, section in enumerate(__a ) if '''sections''' in section] SCREAMING_SNAKE_CASE_ : Optional[Any] = False for idx, modality_doc in modalities_docs: SCREAMING_SNAKE_CASE_ : List[str] = modality_doc['''sections'''] SCREAMING_SNAKE_CASE_ : Union[str, Any] = clean_model_doc_toc(__a ) if old_modality_doc != new_modality_doc: SCREAMING_SNAKE_CASE_ : str = True if overwrite: SCREAMING_SNAKE_CASE_ : Optional[int] = new_modality_doc if diff: if overwrite: SCREAMING_SNAKE_CASE_ : List[Any] = model_doc SCREAMING_SNAKE_CASE_ : int = api_doc with open(__a , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(__a , allow_unicode=__a ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": UpperCAmelCase_ : List[str] = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCAmelCase_ : Tuple = parser.parse_args() check_model_doc(args.fix_and_overwrite)
318
0
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def _A () -> List[Any]: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(lowercase__ ): requests.request('''GET''' , '''https://huggingface.co''' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 ) @pytest.mark.integration def _A () -> List[Any]: """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('''GET''' , '''https://huggingface.co''' ) def _A () -> str: """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(lowercase__ ): http_head('''https://huggingface.co''' )
363
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
318
0
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def _A (__a ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(args.tf_model_dir , '''parameters.json''' ) SCREAMING_SNAKE_CASE_ : Tuple = json.loads(open(__snake_case ).read() ) if not params: raise ValueError( f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' ) if not args.output.endswith('''.pt''' ): SCREAMING_SNAKE_CASE_ : Dict = args.output + ".pt" SCREAMING_SNAKE_CASE_ : str = OrderedDict() with tf.device('''/CPU:0''' ): SCREAMING_SNAKE_CASE_ : int = tf.train.load_checkpoint(args.tf_model_dir ) SCREAMING_SNAKE_CASE_ : Dict = reader.get_variable_to_shape_map() for key_name in shapes.keys(): SCREAMING_SNAKE_CASE_ : Any = reader.get_tensor(__snake_case ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): SCREAMING_SNAKE_CASE_ : Optional[Any] = int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): SCREAMING_SNAKE_CASE_ : Tuple = 8 SCREAMING_SNAKE_CASE_ : Optional[int] = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time SCREAMING_SNAKE_CASE_ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(__snake_case ) elif key_name.startswith('''model/moe''' ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): SCREAMING_SNAKE_CASE_ : Tuple = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player SCREAMING_SNAKE_CASE_ : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor(__snake_case ) elif key_name.endswith('''/softmlp/kernel''' ): SCREAMING_SNAKE_CASE_ : Optional[int] = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player SCREAMING_SNAKE_CASE_ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix SCREAMING_SNAKE_CASE_ : str = torch.tensor(__snake_case ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): SCREAMING_SNAKE_CASE_ : Tuple = key_name[-9:-7] for i in range(16 ): SCREAMING_SNAKE_CASE_ : Optional[int] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer) SCREAMING_SNAKE_CASE_ : List[Any] = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(__snake_case ) elif key_name.startswith('''model/mlp''' ): SCREAMING_SNAKE_CASE_ : Optional[int] = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): SCREAMING_SNAKE_CASE_ : Any = "model.blocks.%d.feed_forward.mlp.wi.weight" % player SCREAMING_SNAKE_CASE_ : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(__snake_case ) elif key_name.endswith('''/p1/bias''' ): SCREAMING_SNAKE_CASE_ : Tuple = "model.blocks.%d.feed_forward.mlp.wi.bias" % player SCREAMING_SNAKE_CASE_ : Optional[Any] = vnp.copy() # same because it is one dimensional SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor(__snake_case ) elif key_name.endswith('''/p2/kernel''' ): SCREAMING_SNAKE_CASE_ : List[Any] = "model.blocks.%d.feed_forward.mlp.wo.weight" % player SCREAMING_SNAKE_CASE_ : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(__snake_case ) elif key_name.endswith('''/p2/bias''' ): SCREAMING_SNAKE_CASE_ : List[Any] = "model.blocks.%d.feed_forward.mlp.wo.bias" % player SCREAMING_SNAKE_CASE_ : Union[str, Any] = vnp.copy() # same because it is one dimensional SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(__snake_case ) elif key_name.startswith('''model/ln''' ): SCREAMING_SNAKE_CASE_ : Optional[Any] = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): SCREAMING_SNAKE_CASE_ : List[str] = "model.blocks.%d.feed_forward.norm.bias" % player SCREAMING_SNAKE_CASE_ : List[str] = vnp.copy() # same because it is one dimensional SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(__snake_case ) elif key_name.endswith('''/g''' ): SCREAMING_SNAKE_CASE_ : Optional[int] = "model.blocks.%d.feed_forward.norm.weight" % player SCREAMING_SNAKE_CASE_ : Optional[int] = vnp.copy() # same because it is one dimensional SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(__snake_case ) elif key_name.startswith('''model/att''' ): SCREAMING_SNAKE_CASE_ : Optional[Any] = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): SCREAMING_SNAKE_CASE_ : int = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum SCREAMING_SNAKE_CASE_ : str = state[:, 0, :, :] SCREAMING_SNAKE_CASE_ : Union[str, Any] = state[:, 1, :, :] SCREAMING_SNAKE_CASE_ : List[Any] = state[:, 2, :, :] SCREAMING_SNAKE_CASE_ : List[Any] = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix SCREAMING_SNAKE_CASE_ : str = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix SCREAMING_SNAKE_CASE_ : int = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix SCREAMING_SNAKE_CASE_ : Dict = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(__snake_case ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player SCREAMING_SNAKE_CASE_ : str = torch.tensor(__snake_case ) SCREAMING_SNAKE_CASE_ : int = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(__snake_case ) elif key_name.endswith('''/o/kernel''' ): SCREAMING_SNAKE_CASE_ : Tuple = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player SCREAMING_SNAKE_CASE_ : List[str] = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix SCREAMING_SNAKE_CASE_ : str = torch.tensor(__snake_case ) elif key_name.startswith('''model/an''' ): SCREAMING_SNAKE_CASE_ : Dict = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): SCREAMING_SNAKE_CASE_ : str = "model.blocks.%d.self_attn.norm.bias" % player SCREAMING_SNAKE_CASE_ : str = vnp.copy() # same because it is one dimensional SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor(__snake_case ) elif key_name.endswith('''/g''' ): SCREAMING_SNAKE_CASE_ : str = "model.blocks.%d.self_attn.norm.weight" % player SCREAMING_SNAKE_CASE_ : Dict = vnp.copy() # same because it is one dimensional SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(__snake_case ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): SCREAMING_SNAKE_CASE_ : Optional[Any] = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[ key_name[-3:] ] SCREAMING_SNAKE_CASE_ : Dict = "model.%s.weight" % nlayer SCREAMING_SNAKE_CASE_ : List[Any] = vnp.copy() # same in embedded SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(__snake_case ) if key_name.startswith('''model/wte''' ): SCREAMING_SNAKE_CASE_ : Any = "lm_head.weight" SCREAMING_SNAKE_CASE_ : Optional[int] = vnp.copy() # same in embedded SCREAMING_SNAKE_CASE_ : str = torch.tensor(__snake_case ) elif key_name.startswith('''model/wob''' ): SCREAMING_SNAKE_CASE_ : Any = "final_logits_bias" SCREAMING_SNAKE_CASE_ : Union[str, Any] = vnp.copy() # same in embedded SCREAMING_SNAKE_CASE_ : Dict = state.reshape((1, -1) ) SCREAMING_SNAKE_CASE_ : int = torch.tensor(__snake_case ) elif key_name == "model/dense/kernel": SCREAMING_SNAKE_CASE_ : Optional[int] = "model.last_project.weight" SCREAMING_SNAKE_CASE_ : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(__snake_case ) elif key_name == "model/dense_1/bias": SCREAMING_SNAKE_CASE_ : Optional[int] = "model.last_project.bias" SCREAMING_SNAKE_CASE_ : Tuple = vnp.copy() # same because it is one dimensional SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(__snake_case ) torch.save(__snake_case , args.output ) if __name__ == "__main__": UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser( description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""") parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""") UpperCAmelCase_ : Optional[Any] = parser.parse_args() convert_tf_gptsan_to_pt(args)
364
"""simple docstring""" from __future__ import annotations UpperCAmelCase_ : List[str] = list[list[int]] # assigning initial values to the grid UpperCAmelCase_ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCAmelCase_ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def _A (__a , __a , __a , __a ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def _A (__a ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def _A (__a ) -> Matrix | None: """simple docstring""" if location := find_empty_location(__a ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__a , __a , __a , __a ): SCREAMING_SNAKE_CASE_ : Tuple = digit if sudoku(__a ) is not None: return grid SCREAMING_SNAKE_CASE_ : Any = 0 return None def _A (__a ) -> None: """simple docstring""" for row in grid: for cell in row: print(__a , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") UpperCAmelCase_ : str = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
318
0
"""simple docstring""" from typing import Any import numpy as np def _A (__a ) -> Optional[Any]: """simple docstring""" return np.array_equal(_a , matrix.conjugate().T ) def _A (__a , __a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = v.conjugate().T SCREAMING_SNAKE_CASE_ : Optional[Any] = v_star.dot(_a ) assert isinstance(_a , np.ndarray ) return (v_star_dot.dot(_a )) / (v_star.dot(_a )) def _A () -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] ) SCREAMING_SNAKE_CASE_ : List[Any] = np.array([[1], [2], [3]] ) assert is_hermitian(_a ), f'{a} is not hermitian.' print(rayleigh_quotient(_a , _a ) ) SCREAMING_SNAKE_CASE_ : str = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(_a ), f'{a} is not hermitian.' assert rayleigh_quotient(_a , _a ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
365
"""simple docstring""" from itertools import permutations def _A (__a ) -> bool: """simple docstring""" if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False SCREAMING_SNAKE_CASE_ : List[str] = [7, 11, 13, 17] for i, test in enumerate(__a ): if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def _A (__a = 10 ) -> int: """simple docstring""" return sum( int(''''''.join(map(__a , __a ) ) ) for num in permutations(range(__a ) ) if is_substring_divisible(__a ) ) if __name__ == "__main__": print(f'''{solution() = }''')
318
0
"""simple docstring""" import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase_ : Optional[int] = {"""vocab_file""": """vocab.txt"""} UpperCAmelCase_ : Any = { """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } UpperCAmelCase_ : List[Any] = { """openbmb/cpm-ant-10b""": 1024, } def _A (__a ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = collections.OrderedDict() with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' ) as reader: SCREAMING_SNAKE_CASE_ : int = reader.readlines() for index, token in enumerate(SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE_ : Optional[int] = token.rstrip('''\n''' ) SCREAMING_SNAKE_CASE_ : str = index return vocab class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Tuple="<unk>" , lowercase_ : Any=200): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = vocab SCREAMING_SNAKE_CASE_ : Any = unk_token SCREAMING_SNAKE_CASE_ : Optional[Any] = max_input_chars_per_word def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = list(_lowercase) if len(_lowercase) > self.max_input_chars_per_word: return [self.unk_token] SCREAMING_SNAKE_CASE_ : Any = 0 SCREAMING_SNAKE_CASE_ : Optional[Any] = [] while start < len(_lowercase): SCREAMING_SNAKE_CASE_ : List[str] = len(_lowercase) SCREAMING_SNAKE_CASE_ : List[str] = None while start < end: SCREAMING_SNAKE_CASE_ : str = ''''''.join(chars[start:end]) if substr in self.vocab: SCREAMING_SNAKE_CASE_ : Union[str, Any] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token) start += 1 else: sub_tokens.append(_lowercase) SCREAMING_SNAKE_CASE_ : List[Any] = end return sub_tokens class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ["""input_ids""", """attention_mask"""] __UpperCamelCase = False def __init__( self : List[str] , lowercase_ : List[str] , lowercase_ : Any="<d>" , lowercase_ : Optional[Any]="</d>" , lowercase_ : Optional[int]="<s>" , lowercase_ : Union[str, Any]="</s>" , lowercase_ : str="<pad>" , lowercase_ : List[Any]="<unk>" , lowercase_ : Optional[int]="</n>" , lowercase_ : Tuple="</_>" , lowercase_ : List[Any]="left" , **lowercase_ : Union[str, Any] , ): '''simple docstring''' requires_backends(self , ['''jieba''']) super().__init__( bod_token=_lowercase , eod_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , pad_token=_lowercase , unk_token=_lowercase , line_token=_lowercase , space_token=_lowercase , padding_side=_lowercase , **_lowercase , ) SCREAMING_SNAKE_CASE_ : Tuple = bod_token SCREAMING_SNAKE_CASE_ : Dict = eod_token SCREAMING_SNAKE_CASE_ : Optional[Any] = load_vocab(_lowercase) SCREAMING_SNAKE_CASE_ : List[str] = self.encoder[space_token] SCREAMING_SNAKE_CASE_ : str = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] SCREAMING_SNAKE_CASE_ : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowercase_: x[1])) SCREAMING_SNAKE_CASE_ : List[str] = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE_ : str = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token) @property def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' return self.encoder[self.bod_token] @property def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' return self.encoder[self.eod_token] @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' return self.encoder["\n"] @property def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' return len(self.encoder) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = [] for x in jieba.cut(_lowercase , cut_all=_lowercase): output_tokens.extend(self.wordpiece_tokenizer.tokenize(_lowercase)) return output_tokens def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , **lowercase_ : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = [i for i in token_ids if i >= 0] SCREAMING_SNAKE_CASE_ : Union[str, Any] = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(_lowercase , **_lowercase) def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Dict): '''simple docstring''' return token in self.encoder def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any]): '''simple docstring''' return "".join(_lowercase) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Union[str, Any]): '''simple docstring''' return self.encoder.get(_lowercase , self.encoder.get(self.unk_token)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Dict): '''simple docstring''' return self.decoder.get(_lowercase , self.unk_token) def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Optional[int] , lowercase_ : int = None): '''simple docstring''' if os.path.isdir(_lowercase): SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join( _lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) else: SCREAMING_SNAKE_CASE_ : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory SCREAMING_SNAKE_CASE_ : str = 0 if " " in self.encoder: SCREAMING_SNAKE_CASE_ : Optional[Any] = self.encoder[''' '''] del self.encoder[" "] if "\n" in self.encoder: SCREAMING_SNAKE_CASE_ : Tuple = self.encoder['''\n'''] del self.encoder["\n"] SCREAMING_SNAKE_CASE_ : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowercase_: x[1])) with open(_lowercase , '''w''' , encoding='''utf-8''') as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' ''' Please check that the vocabulary is not corrupted!''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_index writer.write(token + '''\n''') index += 1 return (vocab_file,) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : int = None): '''simple docstring''' if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Dict = None , lowercase_ : int = False): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase) if token_ids_a is not None: return [1] + ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) return [1] + ([0] * len(_lowercase))
366
"""simple docstring""" UpperCAmelCase_ : List[Any] = 9.8_0_6_6_5 def _A (__a , __a , __a = g ) -> float: """simple docstring""" if fluid_density <= 0: raise ValueError('''Impossible fluid density''' ) if volume < 0: raise ValueError('''Impossible Object volume''' ) if gravity <= 0: raise ValueError('''Impossible Gravity''' ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
318
0
"""simple docstring""" def _A () -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = [] SCREAMING_SNAKE_CASE_ : Tuple = 1 while len(SCREAMING_SNAKE_CASE__ ) < 1e6: constant.append(str(SCREAMING_SNAKE_CASE__ ) ) i += 1 SCREAMING_SNAKE_CASE_ : List[Any] = ''''''.join(SCREAMING_SNAKE_CASE__ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[9_99] ) * int(constant[99_99] ) * int(constant[9_99_99] ) * int(constant[99_99_99] ) ) if __name__ == "__main__": print(solution())
367
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCAmelCase_ : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def _A (__a ) -> Union[str, Any]: """simple docstring""" from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__a ) def _A (__a ) -> Any: """simple docstring""" from transformers.testing_utils import pytest_terminal_summary_main SCREAMING_SNAKE_CASE_ : Optional[Any] = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__a , id=__a )
318
0
"""simple docstring""" import tensorflow as tf from ...tf_utils import shape_list class lowerCAmelCase__ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Tuple , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[int]=1 , lowercase_ : Tuple=False , **lowercase_ : Dict): '''simple docstring''' super().__init__(**__lowerCamelCase) SCREAMING_SNAKE_CASE_ : Tuple = vocab_size SCREAMING_SNAKE_CASE_ : Tuple = d_embed SCREAMING_SNAKE_CASE_ : Any = d_proj SCREAMING_SNAKE_CASE_ : Optional[int] = cutoffs + [vocab_size] SCREAMING_SNAKE_CASE_ : Optional[int] = [0] + self.cutoffs SCREAMING_SNAKE_CASE_ : Union[str, Any] = div_val SCREAMING_SNAKE_CASE_ : List[str] = self.cutoffs[0] SCREAMING_SNAKE_CASE_ : Tuple = len(self.cutoffs) - 1 SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.shortlist_size + self.n_clusters SCREAMING_SNAKE_CASE_ : int = keep_order SCREAMING_SNAKE_CASE_ : Dict = [] SCREAMING_SNAKE_CASE_ : Dict = [] def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Any): '''simple docstring''' if self.n_clusters > 0: SCREAMING_SNAKE_CASE_ : Dict = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=__lowerCamelCase , name='''cluster_weight''') SCREAMING_SNAKE_CASE_ : Dict = self.add_weight( shape=(self.n_clusters,) , initializer='''zeros''' , trainable=__lowerCamelCase , name='''cluster_bias''') if self.div_val == 1: for i in range(len(self.cutoffs)): if self.d_proj != self.d_embed: SCREAMING_SNAKE_CASE_ : Dict = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=__lowerCamelCase , name=F'out_projs_._{i}' , ) self.out_projs.append(__lowerCamelCase) else: self.out_projs.append(__lowerCamelCase) SCREAMING_SNAKE_CASE_ : List[Any] = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=__lowerCamelCase , name=F'out_layers_._{i}_._weight' , ) SCREAMING_SNAKE_CASE_ : List[Any] = self.add_weight( shape=(self.vocab_size,) , initializer='''zeros''' , trainable=__lowerCamelCase , name=F'out_layers_._{i}_._bias' , ) self.out_layers.append((weight, bias)) else: for i in range(len(self.cutoffs)): SCREAMING_SNAKE_CASE_ : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1] SCREAMING_SNAKE_CASE_ : Tuple = self.d_embed // (self.div_val**i) SCREAMING_SNAKE_CASE_ : Tuple = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=__lowerCamelCase , name=F'out_projs_._{i}') self.out_projs.append(__lowerCamelCase) SCREAMING_SNAKE_CASE_ : Optional[int] = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=__lowerCamelCase , name=F'out_layers_._{i}_._weight' , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.add_weight( shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=__lowerCamelCase , name=F'out_layers_._{i}_._bias' , ) self.out_layers.append((weight, bias)) super().build(__lowerCamelCase) @staticmethod def _SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Dict=None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = x if proj is not None: SCREAMING_SNAKE_CASE_ : Tuple = tf.einsum('''ibd,ed->ibe''' , __lowerCamelCase , __lowerCamelCase) return tf.einsum('''ibd,nd->ibn''' , __lowerCamelCase , __lowerCamelCase) + b @staticmethod def _SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = shape_list(__lowerCamelCase) SCREAMING_SNAKE_CASE_ : Dict = tf.range(lp_size[0] , dtype=target.dtype) SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.stack([r, target] , 1) return tf.gather_nd(__lowerCamelCase , __lowerCamelCase) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Union[str, Any]=True , lowercase_ : Tuple=False): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = 0 if self.n_clusters == 0: SCREAMING_SNAKE_CASE_ : List[str] = self._logit(__lowerCamelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0]) if target is not None: SCREAMING_SNAKE_CASE_ : Optional[int] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__lowerCamelCase , logits=__lowerCamelCase) SCREAMING_SNAKE_CASE_ : Dict = tf.nn.log_softmax(__lowerCamelCase , axis=-1) else: SCREAMING_SNAKE_CASE_ : Union[str, Any] = shape_list(__lowerCamelCase) SCREAMING_SNAKE_CASE_ : Optional[int] = [] SCREAMING_SNAKE_CASE_ : Tuple = tf.zeros(hidden_sizes[:2]) for i in range(len(self.cutoffs)): SCREAMING_SNAKE_CASE_ : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: SCREAMING_SNAKE_CASE_ : Tuple = (target >= l_idx) & (target < r_idx) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.where(__lowerCamelCase) SCREAMING_SNAKE_CASE_ : Tuple = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase) - l_idx if self.div_val == 1: SCREAMING_SNAKE_CASE_ : Optional[Any] = self.out_layers[0][0][l_idx:r_idx] SCREAMING_SNAKE_CASE_ : Optional[int] = self.out_layers[0][1][l_idx:r_idx] else: SCREAMING_SNAKE_CASE_ : Optional[int] = self.out_layers[i][0] SCREAMING_SNAKE_CASE_ : int = self.out_layers[i][1] if i == 0: SCREAMING_SNAKE_CASE_ : str = tf.concat([cur_W, self.cluster_weight] , 0) SCREAMING_SNAKE_CASE_ : Tuple = tf.concat([cur_b, self.cluster_bias] , 0) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[0]) SCREAMING_SNAKE_CASE_ : List[str] = tf.nn.log_softmax(__lowerCamelCase) out.append(head_logprob[..., : self.cutoffs[0]]) if target is not None: SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase) SCREAMING_SNAKE_CASE_ : str = self._gather_logprob(__lowerCamelCase , __lowerCamelCase) else: SCREAMING_SNAKE_CASE_ : str = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[i]) SCREAMING_SNAKE_CASE_ : Dict = tf.nn.log_softmax(__lowerCamelCase) SCREAMING_SNAKE_CASE_ : str = self.cutoffs[0] + i - 1 # No probability for the head cluster SCREAMING_SNAKE_CASE_ : Optional[Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(__lowerCamelCase) if target is not None: SCREAMING_SNAKE_CASE_ : List[Any] = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase) SCREAMING_SNAKE_CASE_ : Any = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase) SCREAMING_SNAKE_CASE_ : Dict = self._gather_logprob(__lowerCamelCase , __lowerCamelCase) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(__lowerCamelCase , -cur_logprob , shape_list(__lowerCamelCase)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.concat(__lowerCamelCase , axis=-1) if target is not None: if return_mean: SCREAMING_SNAKE_CASE_ : List[Any] = tf.reduce_mean(__lowerCamelCase) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(__lowerCamelCase) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(__lowerCamelCase , name=self.name , aggregation='''mean''' if return_mean else '''''') return out
368
"""simple docstring""" import argparse import os import re import packaging.version UpperCAmelCase_ : Any = """examples/""" UpperCAmelCase_ : Optional[int] = { """examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""), """doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } UpperCAmelCase_ : List[Any] = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } UpperCAmelCase_ : Optional[int] = """README.md""" def _A (__a , __a , __a ) -> int: """simple docstring""" with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = REPLACE_PATTERNS[pattern] SCREAMING_SNAKE_CASE_ : Optional[int] = replace.replace('''VERSION''' , __a ) SCREAMING_SNAKE_CASE_ : Tuple = re_pattern.sub(__a , __a ) with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(__a ) def _A (__a ) -> int: """simple docstring""" for folder, directories, fnames in os.walk(__a ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(__a , __a ) , __a , pattern='''examples''' ) def _A (__a , __a=False ) -> List[str]: """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__a , __a , __a ) if not patch: update_version_in_examples(__a ) def _A () -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = '''🤗 Transformers currently provides the following architectures''' SCREAMING_SNAKE_CASE_ : Optional[int] = '''1. Want to contribute a new model?''' with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: SCREAMING_SNAKE_CASE_ : Tuple = f.readlines() # Find the start of the list. SCREAMING_SNAKE_CASE_ : Tuple = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 SCREAMING_SNAKE_CASE_ : Dict = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): SCREAMING_SNAKE_CASE_ : List[Any] = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(__a , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(__a ) def _A () -> List[str]: """simple docstring""" with open(REPLACE_FILES['''init'''] , '''r''' ) as f: SCREAMING_SNAKE_CASE_ : Any = f.read() SCREAMING_SNAKE_CASE_ : Dict = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0] return packaging.version.parse(__a ) def _A (__a=False ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: SCREAMING_SNAKE_CASE_ : List[Any] = default_version.base_version elif patch: SCREAMING_SNAKE_CASE_ : int = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: SCREAMING_SNAKE_CASE_ : Any = f'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. SCREAMING_SNAKE_CASE_ : int = input(f'Which version are you releasing? [{default_version}]' ) if len(__a ) == 0: SCREAMING_SNAKE_CASE_ : Optional[Any] = default_version print(f'Updating version to {version}.' ) global_version_update(__a , patch=__a ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def _A () -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = get_version() SCREAMING_SNAKE_CASE_ : Any = f'{current_version.major}.{current_version.minor + 1}.0.dev0' SCREAMING_SNAKE_CASE_ : Union[str, Any] = current_version.base_version # Check with the user we got that right. SCREAMING_SNAKE_CASE_ : int = input(f'Which version are we developing now? [{dev_version}]' ) if len(__a ) == 0: SCREAMING_SNAKE_CASE_ : Optional[int] = dev_version print(f'Updating version to {version}.' ) global_version_update(__a ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") UpperCAmelCase_ : int = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
318
0
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase_ : int = 16 UpperCAmelCase_ : Optional[int] = 32 def _A (__a , __a = 16 ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' ) SCREAMING_SNAKE_CASE_ : List[str] = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__a ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=a__ , max_length=a__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE_ : List[str] = datasets.map( a__ , batched=a__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE_ : List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__a ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE_ : int = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE_ : int = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE_ : List[str] = 8 else: SCREAMING_SNAKE_CASE_ : List[Any] = None return tokenizer.pad( a__ , padding='''longest''' , max_length=a__ , pad_to_multiple_of=a__ , return_tensors='''pt''' , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE_ : Union[str, Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=a__ , collate_fn=a__ , batch_size=a__ ) SCREAMING_SNAKE_CASE_ : str = DataLoader( tokenized_datasets['''validation'''] , shuffle=a__ , collate_fn=a__ , batch_size=a__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase_ : str = mocked_dataloaders # noqa: F811 def _A (__a , __a ) -> int: """simple docstring""" if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , a__ ) == "1": SCREAMING_SNAKE_CASE_ : Optional[int] = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: SCREAMING_SNAKE_CASE_ : int = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: SCREAMING_SNAKE_CASE_ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE_ : List[str] = config['''lr'''] SCREAMING_SNAKE_CASE_ : Optional[int] = int(config['''num_epochs'''] ) SCREAMING_SNAKE_CASE_ : Optional[int] = int(config['''seed'''] ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(config['''batch_size'''] ) set_seed(a__ ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = get_dataloaders(a__ , a__ ) SCREAMING_SNAKE_CASE_ : Tuple = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation SCREAMING_SNAKE_CASE_ : int = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: SCREAMING_SNAKE_CASE_ : Dict = batch_size // MAX_GPU_BATCH_SIZE SCREAMING_SNAKE_CASE_ : int = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=a__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE_ : List[Any] = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE_ : Optional[int] = AdamW(params=model.parameters() , lr=a__ ) # Instantiate scheduler SCREAMING_SNAKE_CASE_ : Any = get_linear_schedule_with_warmup( optimizer=a__ , num_warmup_steps=1_00 , num_training_steps=(len(a__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = accelerator.prepare( a__ , a__ , a__ , a__ , a__ ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: SCREAMING_SNAKE_CASE_ : List[Any] = os.path.split(a__ )[-1].split('''.''' )[0] accelerator.init_trackers(a__ , a__ ) # Now we train the model for epoch in range(a__ ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0 for step, batch in enumerate(a__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) SCREAMING_SNAKE_CASE_ : List[str] = model(**a__ ) SCREAMING_SNAKE_CASE_ : int = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() SCREAMING_SNAKE_CASE_ : Optional[Any] = loss / gradient_accumulation_steps accelerator.backward(a__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(a__ ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Tuple = model(**a__ ) SCREAMING_SNAKE_CASE_ : str = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=a__ , references=a__ , ) SCREAMING_SNAKE_CASE_ : int = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , a__ ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(a__ ), '''epoch''': epoch, } , step=a__ , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def _A () -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=a__ , default=a__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=a__ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args() SCREAMING_SNAKE_CASE_ : Tuple = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(a__ , a__ ) if __name__ == "__main__": main()
369
"""simple docstring""" from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def _A (__a , __a , __a=1e-12 ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T SCREAMING_SNAKE_CASE_ : List[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T return jnp.matmul(__a , norm_emb_a.T ) class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = jnp.floataa def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxCLIPVisionModule(self.config.vision_config) SCREAMING_SNAKE_CASE_ : Tuple = nn.Dense(self.config.projection_dim , use_bias=lowercase_ , dtype=self.dtype) SCREAMING_SNAKE_CASE_ : List[str] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param( '''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim)) SCREAMING_SNAKE_CASE_ : Dict = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,)) def __call__( self : Optional[Any] , lowercase_ : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.vision_model(lowercase_)[1] SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.visual_projection(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.special_care_embeds) SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.concept_embeds) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs SCREAMING_SNAKE_CASE_ : Tuple = 0.0 SCREAMING_SNAKE_CASE_ : Dict = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.round(lowercase_ , 3) SCREAMING_SNAKE_CASE_ : List[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=lowercase_) # Use a lower threshold if an image has any special care concept SCREAMING_SNAKE_CASE_ : Dict = is_special_care * 0.01 SCREAMING_SNAKE_CASE_ : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment SCREAMING_SNAKE_CASE_ : Any = jnp.round(lowercase_ , 3) SCREAMING_SNAKE_CASE_ : Dict = jnp.any(concept_scores > 0 , axis=1) return has_nsfw_concepts class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = CLIPConfig __UpperCamelCase = "clip_input" __UpperCamelCase = FlaxStableDiffusionSafetyCheckerModule def __init__( self : Union[str, Any] , lowercase_ : CLIPConfig , lowercase_ : Optional[Tuple] = None , lowercase_ : int = 0 , lowercase_ : jnp.dtype = jnp.floataa , lowercase_ : bool = True , **lowercase_ : Any , ): '''simple docstring''' if input_shape is None: SCREAMING_SNAKE_CASE_ : List[str] = (1, 224, 224, 3) SCREAMING_SNAKE_CASE_ : List[Any] = self.module_class(config=lowercase_ , dtype=lowercase_ , **lowercase_) super().__init__(lowercase_ , lowercase_ , input_shape=lowercase_ , seed=lowercase_ , dtype=lowercase_ , _do_init=_do_init) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : jax.random.KeyArray , lowercase_ : Tuple , lowercase_ : FrozenDict = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = jax.random.normal(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.random.split(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = {'''params''': params_rng, '''dropout''': dropout_rng} SCREAMING_SNAKE_CASE_ : List[Any] = self.module.init(lowercase_ , lowercase_)['''params'''] return random_params def __call__( self : List[Any] , lowercase_ : List[str] , lowercase_ : dict = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = jnp.transpose(lowercase_ , (0, 2, 3, 1)) return self.module.apply( {'''params''': params or self.params} , jnp.array(lowercase_ , dtype=jnp.floataa) , rngs={} , )
318
0
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] , lowercase_ : int = 16 , lowercase_ : int = 88 , lowercase_ : Optional[int] = None , lowercase_ : int = 1 , lowercase_ : float = 0.0 , lowercase_ : int = 32 , lowercase_ : Optional[int] = None , lowercase_ : bool = False , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "geglu" , lowercase_ : Optional[int] = None , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE_ : int = nn.ModuleList( [ TransformeraDModel( num_attention_heads=lowercase_ , attention_head_dim=lowercase_ , in_channels=lowercase_ , num_layers=lowercase_ , dropout=lowercase_ , norm_num_groups=lowercase_ , cross_attention_dim=lowercase_ , attention_bias=lowercase_ , sample_size=lowercase_ , num_vector_embeds=lowercase_ , activation_fn=lowercase_ , num_embeds_ada_norm=lowercase_ , ) for _ in range(2) ]) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference SCREAMING_SNAKE_CASE_ : str = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` SCREAMING_SNAKE_CASE_ : Any = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` SCREAMING_SNAKE_CASE_ : List[str] = [1, 0] def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : List[Any]=None , lowercase_ : bool = True , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_states SCREAMING_SNAKE_CASE_ : List[str] = [] SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0 # attention_mask is not used yet for i in range(2): # for each of the two transformers, pass the corresponding condition tokens SCREAMING_SNAKE_CASE_ : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] SCREAMING_SNAKE_CASE_ : int = self.transformer_index_for_condition[i] SCREAMING_SNAKE_CASE_ : List[Any] = self.transformers[transformer_index]( lowercase_ , encoder_hidden_states=lowercase_ , timestep=lowercase_ , cross_attention_kwargs=lowercase_ , return_dict=lowercase_ , )[0] encoded_states.append(encoded_state - input_states) tokens_start += self.condition_lengths[i] SCREAMING_SNAKE_CASE_ : List[str] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) SCREAMING_SNAKE_CASE_ : Any = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=lowercase_)
370
"""simple docstring""" from __future__ import annotations import queue class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Tuple , lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = data SCREAMING_SNAKE_CASE_ : Tuple = None SCREAMING_SNAKE_CASE_ : Dict = None def _A () -> TreeNode: """simple docstring""" print('''\n********Press N to stop entering at any point of time********\n''' ) SCREAMING_SNAKE_CASE_ : List[Any] = input('''Enter the value of the root node: ''' ).strip().lower() SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue() SCREAMING_SNAKE_CASE_ : Union[str, Any] = TreeNode(int(__a ) ) q.put(__a ) while not q.empty(): SCREAMING_SNAKE_CASE_ : Optional[int] = q.get() SCREAMING_SNAKE_CASE_ : List[str] = f'Enter the left node of {node_found.data}: ' SCREAMING_SNAKE_CASE_ : Optional[int] = input(__a ).strip().lower() or '''n''' if check == "n": return tree_node SCREAMING_SNAKE_CASE_ : List[str] = TreeNode(int(__a ) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = left_node q.put(__a ) SCREAMING_SNAKE_CASE_ : str = f'Enter the right node of {node_found.data}: ' SCREAMING_SNAKE_CASE_ : str = input(__a ).strip().lower() or '''n''' if check == "n": return tree_node SCREAMING_SNAKE_CASE_ : Any = TreeNode(int(__a ) ) SCREAMING_SNAKE_CASE_ : int = right_node q.put(__a ) raise def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return print(node.data , end=''',''' ) pre_order(node.left ) pre_order(node.right ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return in_order(node.left ) print(node.data , end=''',''' ) in_order(node.right ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=''',''' ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue() q.put(__a ) while not q.empty(): SCREAMING_SNAKE_CASE_ : Tuple = q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue() q.put(__a ) while not q.empty(): SCREAMING_SNAKE_CASE_ : str = [] while not q.empty(): SCREAMING_SNAKE_CASE_ : List[str] = q.get() print(node_dequeued.data , end=''',''' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__a ) def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ : list[TreeNode] = [] SCREAMING_SNAKE_CASE_ : Union[str, Any] = node while n or stack: while n: # start from root node, find its left child print(n.data , end=''',''' ) stack.append(__a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = n.left # end of while means current node doesn't have left child SCREAMING_SNAKE_CASE_ : Tuple = stack.pop() # start to traverse its right child SCREAMING_SNAKE_CASE_ : str = n.right def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ : list[TreeNode] = [] SCREAMING_SNAKE_CASE_ : Any = node while n or stack: while n: stack.append(__a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.left SCREAMING_SNAKE_CASE_ : Any = stack.pop() print(n.data , end=''',''' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.right def _A (__a ) -> None: """simple docstring""" if not isinstance(__a , __a ) or not node: return SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = [], [] SCREAMING_SNAKE_CASE_ : List[Any] = node stacka.append(__a ) while stacka: # to find the reversed order of post order, store it in stack2 SCREAMING_SNAKE_CASE_ : List[str] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__a ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=''',''' ) def _A (__a = "" , __a=50 , __a="*" ) -> str: """simple docstring""" if not s: return "\n" + width * char SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(width - len(__a ) - 2 , 2 ) return f'{left * char} {s} {(left + extra) * char}' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) UpperCAmelCase_ : TreeNode = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
318
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = StableDiffusionXLImgaImgPipeline __UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} __UpperCamelCase = PipelineTesterMixin.required_optional_params - {"latents"} __UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS __UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : int = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=_SCREAMING_SNAKE_CASE , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) SCREAMING_SNAKE_CASE_ : str = EulerDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , ) SCREAMING_SNAKE_CASE_ : Any = CLIPTextModel(_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : Union[str, Any] = CLIPTextModelWithProjection(_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : List[str] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''text_encoder_2''': text_encoder_a, '''tokenizer_2''': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Tuple , lowercase_ : Optional[Any]=0): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE)).to(_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : Optional[int] = image / 2 + 0.5 if str(_SCREAMING_SNAKE_CASE).startswith('''mps'''): SCREAMING_SNAKE_CASE_ : Any = torch.manual_seed(_SCREAMING_SNAKE_CASE) else: SCREAMING_SNAKE_CASE_ : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE).manual_seed(_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : List[str] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 5.0, '''output_type''': '''numpy''', '''strength''': 0.75, } return inputs def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components() SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionXLImgaImgPipeline(**_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : Tuple = sd_pipe.to(_SCREAMING_SNAKE_CASE) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : str = sd_pipe(**_SCREAMING_SNAKE_CASE).images SCREAMING_SNAKE_CASE_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionXLImgaImgPipeline(**_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : Optional[int] = sd_pipe.to(_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : List[Any] = sd_pipe.to(_SCREAMING_SNAKE_CASE) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE) # forward without prompt embeds SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : Union[str, Any] = 3 * ['''this is a negative prompt'''] SCREAMING_SNAKE_CASE_ : List[str] = negative_prompt SCREAMING_SNAKE_CASE_ : Dict = 3 * [inputs['''prompt''']] SCREAMING_SNAKE_CASE_ : str = sd_pipe(**_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : Any = output.images[0, -3:, -3:, -1] # forward with prompt embeds SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : List[Any] = 3 * ['''this is a negative prompt'''] SCREAMING_SNAKE_CASE_ : Tuple = 3 * [inputs.pop('''prompt''')] ( SCREAMING_SNAKE_CASE_ ) : Dict = sd_pipe.encode_prompt(_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : str = sd_pipe( **_SCREAMING_SNAKE_CASE , prompt_embeds=_SCREAMING_SNAKE_CASE , negative_prompt_embeds=_SCREAMING_SNAKE_CASE , pooled_prompt_embeds=_SCREAMING_SNAKE_CASE , negative_pooled_prompt_embeds=_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ : Dict = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4 @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int="cpu" , lowercase_ : Union[str, Any]=torch.floataa , lowercase_ : Any=0): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE).manual_seed(_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : str = np.random.RandomState(_SCREAMING_SNAKE_CASE).standard_normal((1, 4, 64, 64)) SCREAMING_SNAKE_CASE_ : Optional[int] = torch.from_numpy(_SCREAMING_SNAKE_CASE).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : Any = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''') pipe.to(_SCREAMING_SNAKE_CASE) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_inputs(_SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**_SCREAMING_SNAKE_CASE).images SCREAMING_SNAKE_CASE_ : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06]) assert np.abs(image_slice - expected_slice).max() < 7e-3
371
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = "ssube/stable-diffusion-x4-upscaler-onnx" def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any]=0): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_)) SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Any = np.array( [0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23]) assert np.abs(image_slice - expected_slice).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') SCREAMING_SNAKE_CASE_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Any = np.array( [0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Tuple = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Tuple = np.array( [0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') SCREAMING_SNAKE_CASE_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array( [0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') SCREAMING_SNAKE_CASE_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs() SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**lowercase_).images SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : int = np.array( [0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = ort.SessionOptions() SCREAMING_SNAKE_CASE_ : Optional[int] = False return options def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''') SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128)) # using the PNDM scheduler by default SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''A fantasy landscape, trending on artstation''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : List[Any] = pipe( prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type='''np''' , ) SCREAMING_SNAKE_CASE_ : Optional[int] = output.images SCREAMING_SNAKE_CASE_ : Optional[int] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : int = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''') SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128)) SCREAMING_SNAKE_CASE_ : Tuple = LMSDiscreteScheduler.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''') SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowercase_) SCREAMING_SNAKE_CASE_ : int = '''A fantasy landscape, trending on artstation''' SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0) SCREAMING_SNAKE_CASE_ : int = pipe( prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type='''np''' , ) SCREAMING_SNAKE_CASE_ : Optional[int] = output.images SCREAMING_SNAKE_CASE_ : Dict = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_ : List[str] = np.array( [0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
318
0
"""simple docstring""" import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class lowerCAmelCase__ ( a_ ): '''simple docstring''' def __init__( self : Any , lowercase_ : Any = "▁" , lowercase_ : Optional[Any] = True , lowercase_ : Optional[int] = "<unk>" , lowercase_ : Union[str, Any] = "</s>" , lowercase_ : int = "<pad>" , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = { '''pad''': {'''id''': 0, '''token''': pad_token}, '''eos''': {'''id''': 1, '''token''': eos_token}, '''unk''': {'''id''': 2, '''token''': unk_token}, } SCREAMING_SNAKE_CASE_ : int = [None] * len(self.special_tokens) for token_dict in self.special_tokens.values(): SCREAMING_SNAKE_CASE_ : List[Any] = token_dict['''token'''] SCREAMING_SNAKE_CASE_ : Dict = Tokenizer(Unigram()) SCREAMING_SNAKE_CASE_ : List[Any] = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(''' {2,}''') , ''' '''), normalizers.Lowercase(), ]) SCREAMING_SNAKE_CASE_ : List[str] = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_), pre_tokenizers.Digits(individual_digits=lowercase_), pre_tokenizers.Punctuation(), ]) SCREAMING_SNAKE_CASE_ : int = decoders.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = TemplateProcessing( single=F'$A {self.special_tokens["eos"]["token"]}' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , ) SCREAMING_SNAKE_CASE_ : Dict = { '''model''': '''SentencePieceUnigram''', '''replacement''': replacement, '''add_prefix_space''': add_prefix_space, } super().__init__(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Tuple , lowercase_ : List[str] = 8000 , lowercase_ : Union[str, Any] = True , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = trainers.UnigramTrainer( vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , ) if isinstance(lowercase_ , lowercase_): SCREAMING_SNAKE_CASE_ : Optional[Any] = [files] self._tokenizer.train(lowercase_ , trainer=lowercase_) self.add_unk_id() def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Tuple , lowercase_ : Optional[int] = 8000 , lowercase_ : str = True , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = trainers.UnigramTrainer( vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , ) self._tokenizer.train_from_iterator(lowercase_ , trainer=lowercase_) self.add_unk_id() def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = json.loads(self._tokenizer.to_str()) SCREAMING_SNAKE_CASE_ : str = self.special_tokens['''unk''']['''id'''] SCREAMING_SNAKE_CASE_ : Dict = Tokenizer.from_str(json.dumps(lowercase_))
350
"""simple docstring""" from scipy.stats import pearsonr import datasets UpperCAmelCase_ : List[Any] = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ UpperCAmelCase_ : Optional[int] = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ UpperCAmelCase_ : Tuple = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float'''), '''references''': datasets.Value('''float'''), }) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=False): '''simple docstring''' if return_pvalue: SCREAMING_SNAKE_CASE_ : int = pearsonr(lowercase_ , lowercase_) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(lowercase_ , lowercase_)[0])}
318
0
"""simple docstring""" import numpy as np import datasets UpperCAmelCase_ : List[Any] = """ Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] """ UpperCAmelCase_ : Any = """\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } """ UpperCAmelCase_ : Union[str, Any] = """ Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {'mahalanobis': array([0.5])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''') , id='''X'''), }) , ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Any , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array(_snake_case) SCREAMING_SNAKE_CASE_ : Tuple = np.array(_snake_case) # Assert that arrays are 2D if len(X.shape) != 2: raise ValueError('''Expected `X` to be a 2D vector''') if len(reference_distribution.shape) != 2: raise ValueError('''Expected `reference_distribution` to be a 2D vector''') if reference_distribution.shape[0] < 2: raise ValueError( '''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''') # Get mahalanobis distance for each prediction SCREAMING_SNAKE_CASE_ : Optional[Any] = X - np.mean(_snake_case) SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.cov(reference_distribution.T) try: SCREAMING_SNAKE_CASE_ : Optional[Any] = np.linalg.inv(_snake_case) except np.linalg.LinAlgError: SCREAMING_SNAKE_CASE_ : str = np.linalg.pinv(_snake_case) SCREAMING_SNAKE_CASE_ : Any = np.dot(_snake_case , _snake_case) SCREAMING_SNAKE_CASE_ : Tuple = np.dot(_snake_case , X_minus_mu.T).diagonal() return {"mahalanobis": mahal_dist}
351
"""simple docstring""" import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class lowerCAmelCase__ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Any , lowercase_ : Dict[str, int] , lowercase_ : List[str] , lowercase_ : int = None , lowercase_ : int = None): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE_ : str = pad_token_id SCREAMING_SNAKE_CASE_ : Optional[int] = max_length SCREAMING_SNAKE_CASE_ : Dict = vocab SCREAMING_SNAKE_CASE_ : Dict = merges SCREAMING_SNAKE_CASE_ : Union[str, Any] = BytePairTokenizer(lowercase_ , lowercase_ , sequence_length=lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : GPTaTokenizer , *lowercase_ : Optional[Any] , **lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = [''' '''.join(lowercase_) for m in tokenizer.bpe_ranks.keys()] SCREAMING_SNAKE_CASE_ : str = tokenizer.get_vocab() return cls(lowercase_ , lowercase_ , *lowercase_ , **lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : int , lowercase_ : Union[str, os.PathLike] , *lowercase_ : List[str] , **lowercase_ : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = GPTaTokenizer.from_pretrained(lowercase_ , *lowercase_ , **lowercase_) return cls.from_tokenizer(lowercase_ , *lowercase_ , **lowercase_) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : List[Any]): '''simple docstring''' return cls(**lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[Any] , lowercase_ : int = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.tf_tokenizer(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = tf.ones_like(lowercase_) if self.pad_token_id is not None: # pad the tokens up to max length SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_length if max_length is not None else self.max_length if max_length is not None: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = pad_model_inputs( lowercase_ , max_seq_length=lowercase_ , pad_value=self.pad_token_id) return {"attention_mask": attention_mask, "input_ids": input_ids}
318
0
"""simple docstring""" def _A (__a = 1_00 ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = (n * (n + 1) // 2) ** 2 SCREAMING_SNAKE_CASE_ : int = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(f'''{solution() = }''')
352
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ : Union[str, Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''的''', '''价''', '''格''', '''是''', '''15''', '''便''', '''alex''', '''##andra''', ''',''', '''。''', '''-''', '''t''', '''shirt''', ] SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens])) SCREAMING_SNAKE_CASE_ : Dict = { '''do_resize''': True, '''size''': {'''height''': 224, '''width''': 224}, '''do_center_crop''': True, '''crop_size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], '''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], '''do_convert_rgb''': True, } SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname , lowercase_) with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp: json.dump(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowercase_ : str): '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowercase_ : List[Any]): '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowercase_ : str): '''simple docstring''' return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] SCREAMING_SNAKE_CASE_ : Dict = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor() SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) processor_slow.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_) SCREAMING_SNAKE_CASE_ : Any = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) processor_fast.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , lowercase_) self.assertIsInstance(processor_fast.tokenizer , lowercase_) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , lowercase_) self.assertIsInstance(processor_fast.image_processor , lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''') SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor(do_normalize=lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=lowercase_) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , lowercase_) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , lowercase_) def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Tuple = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Any = image_processor(lowercase_ , return_tensors='''np''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(images=lowercase_ , return_tensors='''np''') for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : str = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Dict = '''Alexandra,T-shirt的价格是15便士。''' SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(lowercase_) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : int = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''Alexandra,T-shirt的价格是15便士。''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : int = processor(text=lowercase_ , images=lowercase_) self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values''']) # test if it raises when no input is passed with pytest.raises(lowercase_): processor() def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE_ : Optional[int] = processor.batch_decode(lowercase_) SCREAMING_SNAKE_CASE_ : Dict = tokenizer.batch_decode(lowercase_) self.assertListEqual(lowercase_ , lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Dict = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = '''Alexandra,T-shirt的价格是15便士。''' SCREAMING_SNAKE_CASE_ : Dict = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Dict = processor(text=lowercase_ , images=lowercase_) self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
318
0