code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from argparse import ArgumentParser from . import BaseTransformersCLICommand def _snake_case ( A ) -> Optional[Any]: return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class a__ ( a__ ): '''simple docstring''' @staticmethod def __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ) -> Dict: lowerCAmelCase__ = parser.add_parser('''download''' ) download_parser.add_argument( '''--cache-dir''' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='''Path to location to store the models''' ) download_parser.add_argument( '''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' ) download_parser.add_argument( '''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , ) download_parser.add_argument('''model''' , type=lowerCamelCase_ , help='''Name of the model to download''' ) download_parser.set_defaults(func=lowerCamelCase_ ) def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]: lowerCAmelCase__ = model lowerCAmelCase__ = cache lowerCAmelCase__ = force lowerCAmelCase__ = trust_remote_code def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
90
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class a__ ( a__ ): '''simple docstring''' lowercase__ : torch.FloatTensor class a__ ( a__ , a__ ): '''simple docstring''' @register_to_config def __init__( self , lowerCamelCase_ = 3 , lowerCamelCase_ = 3 , lowerCamelCase_ = ("DownEncoderBlock2D",) , lowerCamelCase_ = ("UpDecoderBlock2D",) , lowerCamelCase_ = (64,) , lowerCamelCase_ = 1 , lowerCamelCase_ = "silu" , lowerCamelCase_ = 3 , lowerCamelCase_ = 32 , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 32 , lowerCamelCase_ = None , lowerCamelCase_ = 0.18_215 , lowerCamelCase_ = "group" , ) -> Union[str, Any]: super().__init__() # pass init params to Encoder lowerCAmelCase__ = Encoder( in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , ) lowerCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 ) lowerCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ ) lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 ) # pass init params to Decoder lowerCAmelCase__ = Decoder( in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , ) @apply_forward_hook def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> VQEncoderOutput: lowerCAmelCase__ = self.encoder(lowerCamelCase_ ) lowerCAmelCase__ = self.quant_conv(lowerCamelCase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowerCamelCase_ ) @apply_forward_hook def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: # also go through quantization layer if not force_not_quantize: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.quantize(lowerCamelCase_ ) else: lowerCAmelCase__ = h lowerCAmelCase__ = self.post_quant_conv(lowerCamelCase_ ) lowerCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == '''spatial''' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: lowerCAmelCase__ = sample lowerCAmelCase__ = self.encode(lowerCamelCase_ ).latents lowerCAmelCase__ = self.decode(lowerCamelCase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase_ )
90
1
'''simple docstring''' import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict __UpperCAmelCase = namedtuple( '''_TestCommandArgs''', [ '''dataset''', '''name''', '''cache_dir''', '''data_dir''', '''all_configs''', '''save_infos''', '''ignore_verifications''', '''force_redownload''', '''clear_cache''', ], defaults=[None, None, None, False, False, False, False, False], ) def _snake_case ( A , A ) -> str: return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def _snake_case ( A ) -> List[str]: lowerCAmelCase__ = _TestCommandArgs(dataset=A , all_configs=A , save_infos=A ) lowerCAmelCase__ = TestCommand(*A ) test_command.run() lowerCAmelCase__ = os.path.join(A , '''README.md''' ) assert os.path.exists(A ) lowerCAmelCase__ = DatasetInfosDict.from_directory(A ) lowerCAmelCase__ = DatasetInfosDict( { '''default''': DatasetInfo( features=Features( { '''tokens''': Sequence(Value('''string''' ) ), '''ner_tags''': Sequence( ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ), '''langs''': Sequence(Value('''string''' ) ), '''spans''': Sequence(Value('''string''' ) ), } ) , splits=[ { '''name''': '''train''', '''num_bytes''': 2351563, '''num_examples''': 10000, }, { '''name''': '''validation''', '''num_bytes''': 238418, '''num_examples''': 1000, }, ] , download_size=3940680 , dataset_size=2589981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: lowerCAmelCase__ , lowerCAmelCase__ = getattr(dataset_infos['''default'''] , A ), getattr(expected_dataset_infos['''default'''] , A ) if key == "num_bytes": assert is_apercent_close(A , A ) elif key == "splits": assert list(A ) == list(A ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
90
'''simple docstring''' from __future__ import annotations __UpperCAmelCase = list[list[int]] # assigning initial values to the grid __UpperCAmelCase = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __UpperCAmelCase = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def _snake_case ( A , A , A , A ) -> bool: for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def _snake_case ( A ) -> tuple[int, int] | None: for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def _snake_case ( A ) -> Matrix | None: if location := find_empty_location(A ): lowerCAmelCase__ , lowerCAmelCase__ = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(A , A , A , A ): lowerCAmelCase__ = digit if sudoku(A ) is not None: return grid lowerCAmelCase__ = 0 return None def _snake_case ( A ) -> None: for row in grid: for cell in row: print(A , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('''\nExample grid:\n''' + '''=''' * 20) print_solution(example_grid) print('''\nExample grid solution:''') __UpperCAmelCase = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('''Cannot find a solution.''')
90
1
'''simple docstring''' import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class a__ : '''simple docstring''' def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=99 , lowerCamelCase_=0 , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_="last" , lowerCamelCase_=True , lowerCamelCase_=None , lowerCamelCase_=0 , ) -> List[str]: lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = seq_length lowerCAmelCase__ = is_training lowerCAmelCase__ = use_input_lengths lowerCAmelCase__ = use_token_type_ids lowerCAmelCase__ = use_labels lowerCAmelCase__ = gelu_activation lowerCAmelCase__ = sinusoidal_embeddings lowerCAmelCase__ = causal lowerCAmelCase__ = asm lowerCAmelCase__ = n_langs lowerCAmelCase__ = vocab_size lowerCAmelCase__ = n_special lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = type_sequence_label_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = num_labels lowerCAmelCase__ = num_choices lowerCAmelCase__ = summary_type lowerCAmelCase__ = use_proj lowerCAmelCase__ = scope lowerCAmelCase__ = bos_token_id def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ = None if self.use_input_lengths: lowerCAmelCase__ = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCAmelCase__ = None if self.use_token_type_ids: lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = None if self.use_labels: lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ = ids_tensor([self.batch_size] , 2 ).float() lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Optional[Any]: lowerCAmelCase__ = XLMModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = model(lowerCamelCase_ , lengths=lowerCamelCase_ , langs=lowerCamelCase_ ) lowerCAmelCase__ = model(lowerCamelCase_ , langs=lowerCamelCase_ ) lowerCAmelCase__ = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Optional[Any]: lowerCAmelCase__ = XLMWithLMHeadModel(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Dict: lowerCAmelCase__ = XLMForQuestionAnsweringSimple(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = model(lowerCamelCase_ ) lowerCAmelCase__ = model(lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ ) lowerCAmelCase__ = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> str: lowerCAmelCase__ = XLMForQuestionAnswering(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = model(lowerCamelCase_ ) lowerCAmelCase__ = model( lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , cls_index=lowerCamelCase_ , is_impossible=lowerCamelCase_ , p_mask=lowerCamelCase_ , ) lowerCAmelCase__ = model( lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , cls_index=lowerCamelCase_ , is_impossible=lowerCamelCase_ , ) ((lowerCAmelCase__) , ) = result_with_labels.to_tuple() lowerCAmelCase__ = model(lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ ) ((lowerCAmelCase__) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Optional[int]: lowerCAmelCase__ = XLMForSequenceClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = model(lowerCamelCase_ ) lowerCAmelCase__ = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> List[str]: lowerCAmelCase__ = self.num_labels lowerCAmelCase__ = XLMForTokenClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> List[str]: lowerCAmelCase__ = self.num_choices lowerCAmelCase__ = XLMForMultipleChoice(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ = model( lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) = config_and_inputs lowerCAmelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths} return config, inputs_dict @require_torch class a__ ( a__ , a__ , a__ , unittest.TestCase ): '''simple docstring''' lowercase__ : List[Any] = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) lowercase__ : int = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable lowercase__ : Tuple = ( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> List[str]: lowerCAmelCase__ = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": lowerCAmelCase__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ ) lowerCAmelCase__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ ) return inputs_dict def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = XLMModelTester(self ) lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , emb_dim=37 ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=1 ) -> Optional[int]: self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) self.assertListEqual( [isinstance(lowerCamelCase_ , lowerCamelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCamelCase_ ) ) self.assertEqual(len(lowerCamelCase_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(lowerCamelCase_ ): # adds PAD dummy token lowerCAmelCase__ = min_length + idx + 1 lowerCAmelCase__ = min_length + idx + 1 lowerCAmelCase__ = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCamelCase_ ) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=1 ) -> int: self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) self.assertListEqual( [isinstance(lowerCamelCase_ , lowerCamelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCamelCase_ ) , ) self.assertEqual(len(lowerCamelCase_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(lowerCamelCase_ ): # adds PAD dummy token lowerCAmelCase__ = min_length + idx + 1 lowerCAmelCase__ = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCamelCase_ ) , ) pass @slow def __SCREAMING_SNAKE_CASE ( self ) -> str: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ = XLMModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) @require_torch class a__ ( unittest.TestCase ): '''simple docstring''' @slow def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' ) model.to(lowerCamelCase_ ) lowerCAmelCase__ = torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCamelCase_ ) # the president lowerCAmelCase__ = [ 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference lowerCAmelCase__ = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCamelCase_ )
90
'''simple docstring''' def _snake_case ( A ) -> int: if n == 1 or not isinstance(A , A ): return 0 elif n == 2: return 1 else: lowerCAmelCase__ = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _snake_case ( A ) -> int: lowerCAmelCase__ = 0 lowerCAmelCase__ = 2 while digits < n: index += 1 lowerCAmelCase__ = len(str(fibonacci(A ) ) ) return index def _snake_case ( A = 1000 ) -> int: return fibonacci_digits_index(A ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
90
1
'''simple docstring''' def _snake_case ( A ) -> int: if not isinstance(A , A ): raise TypeError('''only integers accepted as input''' ) else: lowerCAmelCase__ = str(abs(A ) ) lowerCAmelCase__ = [list(A ) for char in range(len(A ) )] for index in range(len(A ) ): num_transpositions[index].pop(A ) return max( int(''''''.join(list(A ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
90
'''simple docstring''' from __future__ import annotations from random import choice def _snake_case ( A ) -> int: return choice(A ) def _snake_case ( A , A ) -> int: lowerCAmelCase__ = random_pivot(A ) # partition based on pivot # linear time lowerCAmelCase__ = [e for e in lst if e < pivot] lowerCAmelCase__ = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(A ) == k - 1: return pivot # pivot is in elements bigger than k elif len(A ) < k - 1: return kth_number(A , k - len(A ) - 1 ) # pivot is in elements smaller than k else: return kth_number(A , A ) if __name__ == "__main__": import doctest doctest.testmod()
90
1
'''simple docstring''' from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging __UpperCAmelCase = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class a__ ( a__ ): '''simple docstring''' def __init__( self , lowerCamelCase_ = 1_01 ) -> Union[str, Any]: lowerCAmelCase__ = length def __len__( self ) -> Any: return self.length def __getitem__( self , lowerCamelCase_ ) -> int: return i class a__ : '''simple docstring''' def __call__( self , lowerCamelCase_ ) -> Any: return {"input_ids": torch.tensor(lowerCamelCase_ ), "labels": torch.tensor(lowerCamelCase_ )} class a__ ( nn.Module ): '''simple docstring''' def __init__( self ) -> List[str]: super().__init__() # Add some (unused) params otherwise DDP will complain. lowerCAmelCase__ = nn.Linear(1_20 , 80 ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=None ) -> List[Any]: if labels is not None: return torch.tensor(0.0 , device=input_ids.device ), input_ids else: return input_ids class a__ ( a__ ): '''simple docstring''' @require_torch_neuroncore def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = F"""--nproc_per_node=2 --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() lowerCAmelCase__ = self.get_auto_remove_tmp_dir() lowerCAmelCase__ = F"""--output_dir {output_dir}""".split() lowerCAmelCase__ = ['''torchrun'''] + distributed_args + args execute_subprocess_async(lowerCamelCase_ , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class a__ ( a__ ): '''simple docstring''' @require_torch_multi_gpu def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = F"""--nproc_per_node={torch.cuda.device_count()} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() lowerCAmelCase__ = self.get_auto_remove_tmp_dir() lowerCAmelCase__ = F"""--output_dir {output_dir}""".split() lowerCAmelCase__ = ['''torchrun'''] + distributed_args + args execute_subprocess_async(lowerCamelCase_ , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py __UpperCAmelCase = HfArgumentParser((TrainingArguments,)) __UpperCAmelCase = parser.parse_args_into_dataclasses()[0] logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """ f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}""" ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: __UpperCAmelCase = DummyDataset(dataset_length) def _snake_case ( A ) -> Dict: lowerCAmelCase__ = list(range(len(A ) ) ) lowerCAmelCase__ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( '''Predictions and/or labels do not match expected results:\n - predictions: ''' F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" ) return {"success": success} __UpperCAmelCase = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) __UpperCAmelCase = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __UpperCAmelCase = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __UpperCAmelCase = 2 __UpperCAmelCase = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __UpperCAmelCase = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __UpperCAmelCase = None
90
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
90
1
'''simple docstring''' import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class a__ ( a__ ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Union[str, Any]: with open(lowerCamelCase_ , encoding='''utf-8''' ) as input_file: lowerCAmelCase__ = re.compile(r'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' ) lowerCAmelCase__ = input_file.read() lowerCAmelCase__ = regexp.search(lowerCamelCase_ ) return match def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[Any]: with open(lowerCamelCase_ , encoding='''utf-8''' ) as input_file: lowerCAmelCase__ = re.compile(r'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL ) lowerCAmelCase__ = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` lowerCAmelCase__ = regexp.finditer(lowerCamelCase_ ) lowerCAmelCase__ = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = Path('''./datasets''' ) lowerCAmelCase__ = list(dataset_paths.absolute().glob('''**/*.py''' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(lowerCamelCase_ ) ): raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: lowerCAmelCase__ = Path('''./datasets''' ) lowerCAmelCase__ = list(dataset_paths.absolute().glob('''**/*.py''' ) ) for dataset in dataset_files: if self._no_print_statements(str(lowerCamelCase_ ) ): raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
90
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __UpperCAmelCase = TypeVar('''KEY''') __UpperCAmelCase = TypeVar('''VAL''') @dataclass(frozen=a__ , slots=a__ ) class a__ ( Generic[KEY, VAL] ): '''simple docstring''' lowercase__ : KEY lowercase__ : VAL class a__ ( _Item ): '''simple docstring''' def __init__( self ) -> None: super().__init__(lowerCamelCase_ , lowerCamelCase_ ) def __bool__( self ) -> bool: return False __UpperCAmelCase = _DeletedItem() class a__ ( MutableMapping[KEY, VAL] ): '''simple docstring''' def __init__( self , lowerCamelCase_ = 8 , lowerCamelCase_ = 0.75 ) -> None: lowerCAmelCase__ = initial_block_size lowerCAmelCase__ = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 lowerCAmelCase__ = capacity_factor lowerCAmelCase__ = 0 def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return hash(lowerCamelCase_ ) % len(self._buckets ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return (ind + 1) % len(self._buckets ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool: lowerCAmelCase__ = self._buckets[ind] if not stored: lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ ) self._len += 1 return True elif stored.key == key: lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ ) return True else: return False def __SCREAMING_SNAKE_CASE ( self ) -> bool: lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor return len(self ) >= int(lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> bool: if len(self._buckets ) <= self._initial_block_size: return False lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None: lowerCAmelCase__ = self._buckets lowerCAmelCase__ = [None] * new_size lowerCAmelCase__ = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def __SCREAMING_SNAKE_CASE ( self ) -> None: self._resize(len(self._buckets ) * 2 ) def __SCREAMING_SNAKE_CASE ( self ) -> None: self._resize(len(self._buckets ) // 2 ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterator[int]: lowerCAmelCase__ = self._get_bucket_index(lowerCamelCase_ ) for _ in range(len(self._buckets ) ): yield ind lowerCAmelCase__ = self._get_next_ind(lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None: for ind in self._iterate_buckets(lowerCamelCase_ ): if self._try_set(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): break def __setitem__( self , lowerCamelCase_ , lowerCamelCase_ ) -> None: if self._is_full(): self._size_up() self._add_item(lowerCamelCase_ , lowerCamelCase_ ) def __delitem__( self , lowerCamelCase_ ) -> None: for ind in self._iterate_buckets(lowerCamelCase_ ): lowerCAmelCase__ = self._buckets[ind] if item is None: raise KeyError(lowerCamelCase_ ) if item is _deleted: continue if item.key == key: lowerCAmelCase__ = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , lowerCamelCase_ ) -> VAL: for ind in self._iterate_buckets(lowerCamelCase_ ): lowerCAmelCase__ = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(lowerCamelCase_ ) def __len__( self ) -> int: return self._len def __iter__( self ) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self ) -> str: lowerCAmelCase__ = ''' ,'''.join( F"""{item.key}: {item.val}""" for item in self._buckets if item ) return F"""HashMap({val_string})"""
90
1
'''simple docstring''' from math import isqrt def _snake_case ( A ) -> list[int]: lowerCAmelCase__ = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , A , A ): lowerCAmelCase__ = False return [i for i in range(2 , A ) if is_prime[i]] def _snake_case ( A = 10**8 ) -> int: lowerCAmelCase__ = calculate_prime_numbers(max_number // 2 ) lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 lowerCAmelCase__ = len(A ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(f"""{solution() = }""")
90
'''simple docstring''' import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def _snake_case ( A , A , A ) -> Union[str, Any]: lowerCAmelCase__ = OmegaConf.load(A ) lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )['''model'''] lowerCAmelCase__ = list(state_dict.keys() ) # extract state_dict for VQVAE lowerCAmelCase__ = {} lowerCAmelCase__ = '''first_stage_model.''' for key in keys: if key.startswith(A ): lowerCAmelCase__ = state_dict[key] # extract state_dict for UNetLDM lowerCAmelCase__ = {} lowerCAmelCase__ = '''model.diffusion_model.''' for key in keys: if key.startswith(A ): lowerCAmelCase__ = state_dict[key] lowerCAmelCase__ = config.model.params.first_stage_config.params lowerCAmelCase__ = config.model.params.unet_config.params lowerCAmelCase__ = VQModel(**A ).eval() vqvae.load_state_dict(A ) lowerCAmelCase__ = UNetLDMModel(**A ).eval() unet.load_state_dict(A ) lowerCAmelCase__ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=A , ) lowerCAmelCase__ = LDMPipeline(A , A , A ) pipeline.save_pretrained(A ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', type=str, required=True) parser.add_argument('''--config_path''', type=str, required=True) parser.add_argument('''--output_path''', type=str, required=True) __UpperCAmelCase = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
90
1
'''simple docstring''' from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass __UpperCAmelCase = (3, 9, -11, 0, 7, 5, 1, -1) __UpperCAmelCase = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class a__ : '''simple docstring''' lowercase__ : int lowercase__ : Node | None class a__ : '''simple docstring''' def __init__( self , lowerCamelCase_ ) -> None: lowerCAmelCase__ = None for i in sorted(lowerCamelCase_ , reverse=lowerCamelCase_ ): lowerCAmelCase__ = Node(lowerCamelCase_ , self.head ) def __iter__( self ) -> Iterator[int]: lowerCAmelCase__ = self.head while node: yield node.data lowerCAmelCase__ = node.next_node def __len__( self ) -> int: return sum(1 for _ in self ) def __str__( self ) -> str: return " -> ".join([str(lowerCamelCase_ ) for node in self] ) def _snake_case ( A , A ) -> SortedLinkedList: return SortedLinkedList(list(A ) + list(A ) ) if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
90
'''simple docstring''' import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __UpperCAmelCase = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class a__ ( a__ ): '''simple docstring''' lowercase__ : bool = field(default=a__ , metadata={"help": "Whether to use SortishSampler or not."} ) lowercase__ : bool = field( default=a__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowercase__ : Optional[int] = field( default=a__ , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowercase__ : Optional[int] = field( default=a__ , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowercase__ : Optional[Union[str, Path, GenerationConfig]] = field( default=a__ , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: lowerCAmelCase__ = super().to_dict() for k, v in d.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): lowerCAmelCase__ = v.to_dict() return d
90
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class a__ ( a__ ): '''simple docstring''' lowercase__ : Union[str, Any] = "mobilenet_v1" def __init__( self , lowerCamelCase_=3 , lowerCamelCase_=2_24 , lowerCamelCase_=1.0 , lowerCamelCase_=8 , lowerCamelCase_="relu6" , lowerCamelCase_=True , lowerCamelCase_=0.999 , lowerCamelCase_=0.02 , lowerCamelCase_=0.001 , **lowerCamelCase_ , ) -> List[str]: super().__init__(**lowerCamelCase_ ) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''' ) lowerCAmelCase__ = num_channels lowerCAmelCase__ = image_size lowerCAmelCase__ = depth_multiplier lowerCAmelCase__ = min_depth lowerCAmelCase__ = hidden_act lowerCAmelCase__ = tf_padding lowerCAmelCase__ = classifier_dropout_prob lowerCAmelCase__ = initializer_range lowerCAmelCase__ = layer_norm_eps class a__ ( a__ ): '''simple docstring''' lowercase__ : Optional[int] = version.parse("1.11" ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('''pixel_values''', {0: '''batch'''})] ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})] ) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] ) @property def __SCREAMING_SNAKE_CASE ( self ) -> float: return 1e-4
90
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device __UpperCAmelCase = False class a__ ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe.dual_guided( prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase_ ) lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = generator.manual_seed(0 ) lowerCAmelCase__ = pipe.dual_guided( prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = '''cyberpunk 2077''' lowerCAmelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe.dual_guided( prompt=lowerCamelCase_ , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 lowerCAmelCase__ = '''A painting of a squirrel eating a burger ''' lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe.text_to_image( prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 lowerCAmelCase__ = pipe.image_variation(lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''numpy''' ).images lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
90
1
'''simple docstring''' import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class a__ ( a__ ): '''simple docstring''' def __init__( self , lowerCamelCase_=0.01 , lowerCamelCase_=10_00 ) -> Union[str, Any]: lowerCAmelCase__ = p_stop lowerCAmelCase__ = max_length def __iter__( self ) -> Any: lowerCAmelCase__ = 0 lowerCAmelCase__ = False while not stop and count < self.max_length: yield count count += 1 lowerCAmelCase__ = random.random() < self.p_stop class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True ) -> Optional[Any]: lowerCAmelCase__ = [ BatchSamplerShard(lowerCamelCase_ , 2 , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ ) for i in range(2 ) ] lowerCAmelCase__ = [list(lowerCamelCase_ ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(lowerCamelCase_ ) for shard in batch_sampler_shards] , [len(lowerCamelCase_ ) for e in expected] ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: # Check the shards when the dataset is a round multiple of total batch size. lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase_ ) # Expected shouldn't change self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. lowerCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ ) # Check the shards when the dataset is very small. lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [[], []] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> str: # Check the shards when the dataset is a round multiple of batch size. lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ ) lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase_ ) # Expected shouldn't change self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ ) # Check the shards when the dataset is not a round multiple of batch size. lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ ) lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ ) lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ ) # Check the shards when the dataset is very small. lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ ) lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [[], []] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: # Check the shards when the dataset is a round multiple of total batch size. lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ ) lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase_ ) # Expected shouldn't change self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ ) lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ ) lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. lowerCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ ) lowerCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ ) # Check the shards when the dataset is very small. lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [[[0, 1]], []] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ ) lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [[], []] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: # Check the shards when the dataset is a round multiple of batch size. lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ ) lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase_ ) # Expected shouldn't change self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ ) # Check the shards when the dataset is not a round multiple of batch size. lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ ) lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ ) lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ ) # Check the shards when the dataset is very small. lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [[[0, 1]], []] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ ) lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = [[], []] self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] lowerCAmelCase__ = [BatchSamplerShard(lowerCamelCase_ , 2 , lowerCamelCase_ , even_batches=lowerCamelCase_ ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=False ) -> str: random.seed(lowerCamelCase_ ) lowerCAmelCase__ = list(lowerCamelCase_ ) lowerCAmelCase__ = [ IterableDatasetShard( lowerCamelCase_ , batch_size=lowerCamelCase_ , drop_last=lowerCamelCase_ , num_processes=lowerCamelCase_ , process_index=lowerCamelCase_ , split_batches=lowerCamelCase_ , ) for i in range(lowerCamelCase_ ) ] lowerCAmelCase__ = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(lowerCamelCase_ ) iterable_dataset_lists.append(list(lowerCamelCase_ ) ) lowerCAmelCase__ = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size lowerCAmelCase__ = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) ) self.assertTrue(len(lowerCamelCase_ ) % shard_batch_size == 0 ) lowerCAmelCase__ = [] for idx in range(0 , len(lowerCamelCase_ ) , lowerCamelCase_ ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(lowerCamelCase_ ) < len(lowerCamelCase_ ): reference += reference self.assertListEqual(lowerCamelCase_ , reference[: len(lowerCamelCase_ )] ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: lowerCAmelCase__ = 42 lowerCAmelCase__ = RandomIterableDataset() self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ ) self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ ) self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ ) self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ ) # Edge case with a very small dataset lowerCAmelCase__ = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ ) self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ ) self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ ) self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: lowerCAmelCase__ = BatchSampler(range(16 ) , batch_size=4 , drop_last=lowerCamelCase_ ) lowerCAmelCase__ = SkipBatchSampler(lowerCamelCase_ , 2 ) self.assertListEqual(list(lowerCamelCase_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: lowerCAmelCase__ = DataLoader(list(range(16 ) ) , batch_size=4 ) lowerCAmelCase__ = skip_first_batches(lowerCamelCase_ , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(lowerCamelCase_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(lowerCamelCase_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def __SCREAMING_SNAKE_CASE ( self ) -> str: Accelerator() lowerCAmelCase__ = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(lowerCamelCase_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(lowerCamelCase_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
90
'''simple docstring''' from __future__ import annotations def _snake_case ( A ) -> bool: lowerCAmelCase__ = str(A ) return len(A ) == 9 and set(A ) == set('''123456789''' ) def _snake_case ( ) -> int | None: for base_num in range(9999 , 4999 , -1 ): lowerCAmelCase__ = 100002 * base_num if is_9_pandigital(A ): return candidate for base_num in range(333 , 99 , -1 ): lowerCAmelCase__ = 1002003 * base_num if is_9_pandigital(A ): return candidate return None if __name__ == "__main__": print(f"""{solution() = }""")
90
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class a__ ( a__ , a__ ): '''simple docstring''' lowercase__ : str = "focalnet" def __init__( self , lowerCamelCase_=2_24 , lowerCamelCase_=4 , lowerCamelCase_=3 , lowerCamelCase_=96 , lowerCamelCase_=False , lowerCamelCase_=[1_92, 3_84, 7_68, 7_68] , lowerCamelCase_=[2, 2, 6, 2] , lowerCamelCase_=[2, 2, 2, 2] , lowerCamelCase_=[3, 3, 3, 3] , lowerCamelCase_="gelu" , lowerCamelCase_=4.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_=False , lowerCamelCase_=1e-4 , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=0.02 , lowerCamelCase_=1e-5 , lowerCamelCase_=32 , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ , ) -> str: super().__init__(**lowerCamelCase_ ) lowerCAmelCase__ = image_size lowerCAmelCase__ = patch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = embed_dim lowerCAmelCase__ = use_conv_embed lowerCAmelCase__ = hidden_sizes lowerCAmelCase__ = depths lowerCAmelCase__ = focal_levels lowerCAmelCase__ = focal_windows lowerCAmelCase__ = hidden_act lowerCAmelCase__ = mlp_ratio lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = drop_path_rate lowerCAmelCase__ = use_layerscale lowerCAmelCase__ = layerscale_value lowerCAmelCase__ = use_post_layernorm lowerCAmelCase__ = use_post_layernorm_in_modulation lowerCAmelCase__ = normalize_modulator lowerCAmelCase__ = initializer_range lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = encoder_stride lowerCAmelCase__ = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ = get_aligned_output_features_output_indices( out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
90
'''simple docstring''' # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES __UpperCAmelCase = '''tiny-wmt19-en-ru''' # Build # borrowed from a test __UpperCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] __UpperCAmelCase = dict(zip(vocab, range(len(vocab)))) __UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] with tempfile.TemporaryDirectory() as tmpdirname: __UpperCAmelCase = Path(tmpdirname) __UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file'''] __UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file'''] __UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file'''] with open(src_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, '''w''') as fp: fp.write('''\n'''.join(merges)) __UpperCAmelCase = FSMTTokenizer( langs=['''en''', '''ru'''], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) __UpperCAmelCase = FSMTConfig( langs=['''ru''', '''en'''], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) __UpperCAmelCase = FSMTForConditionalGeneration(config) print(f"""num of params {tiny_model.num_parameters()}""") # Test __UpperCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''') __UpperCAmelCase = tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"""Generated {mname_tiny}""") # Upload # transformers-cli upload tiny-wmt19-en-ru
90
1
'''simple docstring''' import os from collections import deque import torch from torch.utils.data import Dataset class a__ ( a__ ): '''simple docstring''' def __init__( self , lowerCamelCase_="" , lowerCamelCase_="train" ) -> Optional[int]: assert os.path.isdir(lowerCamelCase_ ) lowerCAmelCase__ = [] lowerCAmelCase__ = os.listdir(lowerCamelCase_ ) for story_filename in story_filenames_list: if "summary" in story_filename: continue lowerCAmelCase__ = os.path.join(lowerCamelCase_ , lowerCamelCase_ ) if not os.path.isfile(lowerCamelCase_ ): continue self.documents.append(lowerCamelCase_ ) def __len__( self ) -> List[Any]: return len(self.documents ) def __getitem__( self , lowerCamelCase_ ) -> Dict: lowerCAmelCase__ = self.documents[idx] lowerCAmelCase__ = document_path.split('''/''' )[-1] with open(lowerCamelCase_ , encoding='''utf-8''' ) as source: lowerCAmelCase__ = source.read() lowerCAmelCase__ , lowerCAmelCase__ = process_story(lowerCamelCase_ ) return document_name, story_lines, summary_lines def _snake_case ( A ) -> Optional[int]: lowerCAmelCase__ = list(filter(lambda A : len(A ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) ) # for some unknown reason some lines miss a period, add it lowerCAmelCase__ = [_add_missing_period(A ) for line in nonempty_lines] # gather article lines lowerCAmelCase__ = [] lowerCAmelCase__ = deque(A ) while True: try: lowerCAmelCase__ = lines.popleft() if element.startswith('''@highlight''' ): break story_lines.append(A ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines lowerCAmelCase__ = list(filter(lambda A : not t.startswith('''@highlight''' ) , A ) ) return story_lines, summary_lines def _snake_case ( A ) -> Dict: lowerCAmelCase__ = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')'''] if line.startswith('''@highlight''' ): return line if line[-1] in END_TOKENS: return line return line + "." def _snake_case ( A , A , A ) -> str: if len(A ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(A )) ) return sequence def _snake_case ( A , A ) -> str: lowerCAmelCase__ = torch.ones_like(A ) lowerCAmelCase__ = sequence == pad_token_id lowerCAmelCase__ = 0 return mask def _snake_case ( A , A , A ) -> Dict: lowerCAmelCase__ = [tokenizer.encode(A ) for line in story_lines] lowerCAmelCase__ = [token for sentence in story_lines_token_ids for token in sentence] lowerCAmelCase__ = [tokenizer.encode(A ) for line in summary_lines] lowerCAmelCase__ = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def _snake_case ( A , A ) -> List[Any]: lowerCAmelCase__ = [] for sequence in batch: lowerCAmelCase__ = -1 lowerCAmelCase__ = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(A ) return torch.tensor(A )
90
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def _snake_case ( ) -> Union[str, Any]: raise RuntimeError('''CUDA out of memory.''' ) class a__ ( nn.Module ): '''simple docstring''' def __init__( self ) -> int: super().__init__() lowerCAmelCase__ = nn.Linear(3 , 4 ) lowerCAmelCase__ = nn.BatchNormad(4 ) lowerCAmelCase__ = nn.Linear(4 , 5 ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]: return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_ ) ) ) class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(lowerCamelCase_ ): nonlocal batch_sizes batch_sizes.append(lowerCamelCase_ ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ ): nonlocal batch_sizes batch_sizes.append(lowerCamelCase_ ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga lowerCAmelCase__ , lowerCAmelCase__ = mock_training_loop_function('''hello''' ) self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(lowerCamelCase_ ): pass with self.assertRaises(lowerCamelCase_ ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(lowerCamelCase_ ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(lowerCamelCase_ ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(lowerCamelCase_ ) as cm: mock_training_loop_function(1_28 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(lowerCamelCase_ ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(lowerCamelCase_ ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = torch.cuda.memory_allocated() lowerCAmelCase__ = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_ ) lowerCAmelCase__ = release_memory(lowerCamelCase_ ) self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_ )
90
1
'''simple docstring''' import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class a__ : '''simple docstring''' def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=64 , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ) -> List[Any]: lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = seq_length lowerCAmelCase__ = is_training lowerCAmelCase__ = use_input_mask lowerCAmelCase__ = use_token_type_ids lowerCAmelCase__ = use_labels lowerCAmelCase__ = vocab_size lowerCAmelCase__ = hidden_size lowerCAmelCase__ = embedding_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = type_vocab_size lowerCAmelCase__ = type_sequence_label_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = num_labels lowerCAmelCase__ = num_choices lowerCAmelCase__ = scope def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ = None if self.use_input_mask: lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ = None if self.use_token_type_ids: lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = None if self.use_labels: lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __SCREAMING_SNAKE_CASE ( self ) -> Any: return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any: lowerCAmelCase__ = MegatronBertModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) lowerCAmelCase__ = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) lowerCAmelCase__ = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]: lowerCAmelCase__ = MegatronBertForMaskedLM(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any: lowerCAmelCase__ = MegatronBertForCausalLM(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int: lowerCAmelCase__ = MegatronBertForNextSentencePrediction(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = model( lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int: lowerCAmelCase__ = MegatronBertForPreTraining(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = model( lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , next_sentence_label=lowerCamelCase_ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict: lowerCAmelCase__ = MegatronBertForQuestionAnswering(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = model( lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]: lowerCAmelCase__ = self.num_labels lowerCAmelCase__ = MegatronBertForSequenceClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]: lowerCAmelCase__ = self.num_labels lowerCAmelCase__ = MegatronBertForTokenClassification(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any: lowerCAmelCase__ = self.num_choices lowerCAmelCase__ = MegatronBertForMultipleChoice(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ = model( lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) = config_and_inputs lowerCAmelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class a__ ( a__ , a__ , unittest.TestCase ): '''simple docstring''' lowercase__ : Any = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) lowercase__ : Union[str, Any] = ( { "feature-extraction": MegatronBertModel, "fill-mask": MegatronBertForMaskedLM, "question-answering": MegatronBertForQuestionAnswering, "text-classification": MegatronBertForSequenceClassification, "text-generation": MegatronBertForCausalLM, "token-classification": MegatronBertForTokenClassification, "zero-shot": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) lowercase__ : Dict = True # test_resize_embeddings = False lowercase__ : Optional[Any] = False def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> Optional[int]: lowerCAmelCase__ = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ ) if return_labels: if model_class in get_values(lowerCamelCase_ ): lowerCAmelCase__ = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase_ ) lowerCAmelCase__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ ) return inputs_dict def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = MegatronBertModelTester(self ) lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: self.config_tester.run_common_tests() def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCamelCase_ ) def _snake_case ( A ) -> Any: return torch.tensor( A , dtype=torch.long , device=A , ) __UpperCAmelCase = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class a__ ( unittest.TestCase ): '''simple docstring''' @slow @unittest.skip('''Model is not available.''' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: lowerCAmelCase__ = os.path.join(os.environ['''MYDIR'''] , lowerCamelCase_ ) lowerCAmelCase__ = MegatronBertModel.from_pretrained(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.half() lowerCAmelCase__ = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] ) with torch.no_grad(): lowerCAmelCase__ = model(lowerCamelCase_ )[0] lowerCAmelCase__ = torch.Size((1, 9, 10_24) ) self.assertEqual(output.shape , lowerCamelCase_ ) lowerCAmelCase__ = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728] for ii in range(3 ): for jj in range(3 ): lowerCAmelCase__ = output[0, ii, jj] lowerCAmelCase__ = expected[3 * ii + jj] lowerCAmelCase__ = '''ii={} jj={} a={} b={}'''.format(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) self.assertTrue(math.isclose(lowerCamelCase_ , lowerCamelCase_ , rel_tol=lowerCamelCase_ , abs_tol=lowerCamelCase_ ) , msg=lowerCamelCase_ )
90
'''simple docstring''' import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __UpperCAmelCase = logging.getLogger(__name__) def _snake_case ( A , A , A = None , A = None , A = None , A = None , A = None , A = False , ) -> Union[str, Any]: lowerCAmelCase__ = bnb_quantization_config.load_in_abit lowerCAmelCase__ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( '''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,''' ''' make sure you have the latest version of `bitsandbytes` installed.''' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( '''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,''' '''make sure you have the latest version of `bitsandbytes` installed.''' ) lowerCAmelCase__ = [] # custom device map if isinstance(A , A ) and len(device_map.keys() ) > 1: lowerCAmelCase__ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: lowerCAmelCase__ = get_keys_to_not_convert(A ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(A ) lowerCAmelCase__ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: lowerCAmelCase__ = [] lowerCAmelCase__ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(A ) # compatibility with peft lowerCAmelCase__ = load_in_abit lowerCAmelCase__ = load_in_abit lowerCAmelCase__ = get_parameter_device(A ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( '''It is not recommended to quantize a loaded model. ''' '''The model should be instantiated under the `init_empty_weights` context manager.''' ) lowerCAmelCase__ = replace_with_bnb_layers(A , A , modules_to_not_convert=A ) # convert param to the right dtype lowerCAmelCase__ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: lowerCAmelCase__ = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' ) lowerCAmelCase__ = getattr(A , A , A ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(A ): param.to(A ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info( F"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" '''We move the model to cuda.''' ) return model elif weights_location is None: raise RuntimeError( F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): lowerCAmelCase__ = replace_with_bnb_layers( A , A , modules_to_not_convert=A ) lowerCAmelCase__ = get_quantized_model_device_map( A , A , A , max_memory=A , no_split_module_classes=A , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): lowerCAmelCase__ = True lowerCAmelCase__ = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] ) load_checkpoint_in_model( A , A , A , dtype=bnb_quantization_config.torch_dtype , offload_folder=A , offload_state_dict=A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(A , device_map=A , offload_dir=A ) def _snake_case ( A , A , A=None , A=None , A=None ) -> List[Any]: if device_map is None: if torch.cuda.is_available(): lowerCAmelCase__ = {'''''': torch.cuda.current_device()} else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' ) if isinstance(A , A ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( '''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ''' '''\'sequential\'.''' ) lowerCAmelCase__ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) lowerCAmelCase__ = {} lowerCAmelCase__ = special_dtypes lowerCAmelCase__ = no_split_module_classes lowerCAmelCase__ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": lowerCAmelCase__ = get_balanced_memory( A , low_zero=(device_map == '''balanced_low_0''') , max_memory=A , **A , ) lowerCAmelCase__ = max_memory lowerCAmelCase__ = infer_auto_device_map(A , **A ) if isinstance(A , A ): # check if don't have any quantized module on the cpu lowerCAmelCase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules lowerCAmelCase__ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( ''' Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. ''' ) else: logger.info( '''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' ) del device_map_without_some_modules return device_map def _snake_case ( A , A , A=None , A=None ) -> Any: if modules_to_not_convert is None: lowerCAmelCase__ = [] lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers( A , A , A , A ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def _snake_case ( A , A , A=None , A=None , ) -> Optional[Any]: lowerCAmelCase__ = False for name, module in model.named_children(): if current_key_name is None: lowerCAmelCase__ = [] current_key_name.append(A ) if isinstance(A , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` lowerCAmelCase__ = '''.'''.join(A ) lowerCAmelCase__ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: lowerCAmelCase__ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: lowerCAmelCase__ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: lowerCAmelCase__ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' ) lowerCAmelCase__ = module.weight.data if module.bias is not None: lowerCAmelCase__ = module.bias.data bnb_module.requires_grad_(A ) setattr(A , A , A ) lowerCAmelCase__ = True if len(list(module.children() ) ) > 0: lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers( A , A , A , A ) lowerCAmelCase__ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _snake_case ( A ) -> Tuple: # Create a copy of the model with init_empty_weights(): lowerCAmelCase__ = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager` lowerCAmelCase__ = find_tied_parameters(A ) # For compatibility with Accelerate < 0.18 if isinstance(A , A ): lowerCAmelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowerCAmelCase__ = sum(A , [] ) lowerCAmelCase__ = len(A ) > 0 # Check if it is a base model lowerCAmelCase__ = False if hasattr(A , '''base_model_prefix''' ): lowerCAmelCase__ = not hasattr(A , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowerCAmelCase__ = list(model.named_children() ) lowerCAmelCase__ = [list_modules[-1][0]] # add last module together with tied weights lowerCAmelCase__ = set(A ) - set(A ) lowerCAmelCase__ = list(set(A ) ) + list(A ) # remove ".weight" from the keys lowerCAmelCase__ = ['''.weight''', '''.bias'''] lowerCAmelCase__ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowerCAmelCase__ = name.replace(A , '''''' ) filtered_module_names.append(A ) return filtered_module_names def _snake_case ( A ) -> Optional[int]: for m in model.modules(): if isinstance(A , bnb.nn.Linearabit ): return True return False def _snake_case ( A ) -> Union[str, Any]: return next(parameter.parameters() ).device def _snake_case ( A , A , A , A , A , A , A ) -> Any: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(A , A , 0 , dtype=A , value=A ) lowerCAmelCase__ = param_name lowerCAmelCase__ = model if "." in tensor_name: lowerCAmelCase__ = tensor_name.split('''.''' ) for split in splits[:-1]: lowerCAmelCase__ = getattr(A , A ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) lowerCAmelCase__ = new_module lowerCAmelCase__ = splits[-1] # offload weights lowerCAmelCase__ = False offload_weight(module._parameters[tensor_name] , A , A , index=A ) if hasattr(module._parameters[tensor_name] , '''SCB''' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A , index=A , ) else: offload_weight(A , A , A , index=A ) offload_weight(A , param_name.replace('''weight''' , '''SCB''' ) , A , index=A ) set_module_tensor_to_device(A , A , '''meta''' , dtype=A , value=torch.empty(*param.size() ) )
90
1
'''simple docstring''' from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run __UpperCAmelCase = True except (ImportError, AttributeError): __UpperCAmelCase = object def _snake_case ( *A , **A ) -> str: pass __UpperCAmelCase = False __UpperCAmelCase = logging.get_logger('''transformers-cli/serving''') def _snake_case ( A ) -> Optional[int]: lowerCAmelCase__ = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) return ServeCommand(A , args.host , args.port , args.workers ) class a__ ( a__ ): '''simple docstring''' lowercase__ : dict class a__ ( a__ ): '''simple docstring''' lowercase__ : List[str] lowercase__ : Optional[List[int]] class a__ ( a__ ): '''simple docstring''' lowercase__ : str class a__ ( a__ ): '''simple docstring''' lowercase__ : Any class a__ ( a__ ): '''simple docstring''' @staticmethod def __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ) -> List[str]: lowerCAmelCase__ = parser.add_parser( '''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' ) serve_parser.add_argument( '''--task''' , type=lowerCamelCase_ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , ) serve_parser.add_argument('''--host''' , type=lowerCamelCase_ , default='''localhost''' , help='''Interface the server will listen on.''' ) serve_parser.add_argument('''--port''' , type=lowerCamelCase_ , default=88_88 , help='''Port the serving will listen to.''' ) serve_parser.add_argument('''--workers''' , type=lowerCamelCase_ , default=1 , help='''Number of http workers''' ) serve_parser.add_argument('''--model''' , type=lowerCamelCase_ , help='''Model\'s name or path to stored model.''' ) serve_parser.add_argument('''--config''' , type=lowerCamelCase_ , help='''Model\'s config name or path to stored model.''' ) serve_parser.add_argument('''--tokenizer''' , type=lowerCamelCase_ , help='''Tokenizer name to use.''' ) serve_parser.add_argument( '''--device''' , type=lowerCamelCase_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , ) serve_parser.set_defaults(func=lowerCamelCase_ ) def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict: lowerCAmelCase__ = pipeline lowerCAmelCase__ = host lowerCAmelCase__ = port lowerCAmelCase__ = workers if not _serve_dependencies_installed: raise RuntimeError( '''Using serve command requires FastAPI and uvicorn. ''' '''Please install transformers with [serving]: pip install "transformers[serving]".''' '''Or install FastAPI and uvicorn separately.''' ) else: logger.info(F"""Serving model over {host}:{port}""" ) lowerCAmelCase__ = FastAPI( routes=[ APIRoute( '''/''' , self.model_info , response_model=lowerCamelCase_ , response_class=lowerCamelCase_ , methods=['''GET'''] , ), APIRoute( '''/tokenize''' , self.tokenize , response_model=lowerCamelCase_ , response_class=lowerCamelCase_ , methods=['''POST'''] , ), APIRoute( '''/detokenize''' , self.detokenize , response_model=lowerCamelCase_ , response_class=lowerCamelCase_ , methods=['''POST'''] , ), APIRoute( '''/forward''' , self.forward , response_model=lowerCamelCase_ , response_class=lowerCamelCase_ , methods=['''POST'''] , ), ] , timeout=6_00 , ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: run(self._app , host=self.host , port=self.port , workers=self.workers ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = Body(lowerCamelCase_ , embed=lowerCamelCase_ ) , lowerCamelCase_ = Body(lowerCamelCase_ , embed=lowerCamelCase_ ) ) -> List[Any]: try: lowerCAmelCase__ = self._pipeline.tokenizer.tokenize(lowerCamelCase_ ) if return_ids: lowerCAmelCase__ = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) return ServeTokenizeResult(tokens=lowerCamelCase_ , tokens_ids=lowerCamelCase_ ) else: return ServeTokenizeResult(tokens=lowerCamelCase_ ) except Exception as e: raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(lowerCamelCase_ )} ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = Body(lowerCamelCase_ , embed=lowerCamelCase_ ) , lowerCamelCase_ = Body(lowerCamelCase_ , embed=lowerCamelCase_ ) , lowerCamelCase_ = Body(lowerCamelCase_ , embed=lowerCamelCase_ ) , ) -> Optional[Any]: try: lowerCAmelCase__ = self._pipeline.tokenizer.decode(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) return ServeDeTokenizeResult(model='''''' , text=lowerCamelCase_ ) except Exception as e: raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(lowerCamelCase_ )} ) async def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=Body(lowerCamelCase_ , embed=lowerCamelCase_ ) ) -> List[str]: # Check we don't have empty string if len(lowerCamelCase_ ) == 0: return ServeForwardResult(output=[] , attention=[] ) try: # Forward through the model lowerCAmelCase__ = self._pipeline(lowerCamelCase_ ) return ServeForwardResult(output=lowerCamelCase_ ) except Exception as e: raise HTTPException(5_00 , {'''error''': str(lowerCamelCase_ )} )
90
'''simple docstring''' from collections.abc import Callable import numpy as np def _snake_case ( A , A , A , A , A ) -> np.array: lowerCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) ) lowerCAmelCase__ = np.zeros((n + 1,) ) lowerCAmelCase__ = ya lowerCAmelCase__ = xa for k in range(A ): lowerCAmelCase__ = y[k] + step_size * ode_func(A , y[k] ) lowerCAmelCase__ = y[k] + ( (step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
90
1
'''simple docstring''' import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class a__ ( a__ , unittest.TestCase ): '''simple docstring''' lowercase__ : List[str] = XLMTokenizer lowercase__ : Optional[int] = False def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase__ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] lowerCAmelCase__ = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) lowerCAmelCase__ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(lowerCamelCase_ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(lowerCamelCase_ ) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]: lowerCAmelCase__ = '''lower newer''' lowerCAmelCase__ = '''lower newer''' return input_text, output_text def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = XLMTokenizer(self.vocab_file , self.merges_file ) lowerCAmelCase__ = '''lower''' lowerCAmelCase__ = ['''low''', '''er</w>'''] lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = tokens + ['''<unk>'''] lowerCAmelCase__ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' ) lowerCAmelCase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
90
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class a__ : '''simple docstring''' def __init__( self , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=64 , lowerCamelCase_=None ) -> Dict: lowerCAmelCase__ = np.random.default_rng(lowerCamelCase_ ) lowerCAmelCase__ = length lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa ) lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self ) -> Any: return self.length def __getitem__( self , lowerCamelCase_ ) -> List[str]: return {"x": self.x[i], "y": self.y[i]} class a__ ( torch.nn.Module ): '''simple docstring''' def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> List[Any]: super().__init__() lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase__ = True def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Optional[Any]: if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) lowerCAmelCase__ = False return x * self.a[0] + self.b[0] class a__ ( torch.nn.Module ): '''simple docstring''' def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> Any: super().__init__() lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() ) lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() ) lowerCAmelCase__ = True def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any: if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) lowerCAmelCase__ = False return x * self.a + self.b def _snake_case ( A , A = 16 ) -> Any: from datasets import load_dataset from transformers import AutoTokenizer lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowerCAmelCase__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} lowerCAmelCase__ = load_dataset('''csv''' , data_files=A ) lowerCAmelCase__ = datasets['''train'''].unique('''label''' ) lowerCAmelCase__ = {v: i for i, v in enumerate(A )} def tokenize_function(A ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase__ = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A , padding='''max_length''' ) if "label" in examples: lowerCAmelCase__ = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase__ = datasets.map( A , batched=A , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(A ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. lowerCAmelCase__ = DataLoader(tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=2 ) lowerCAmelCase__ = DataLoader(tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=1 ) return train_dataloader, eval_dataloader
90
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = '''laion/clap-htsat-unfused''' lowerCAmelCase__ = tempfile.mkdtemp() def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> Optional[int]: return RobertaTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> Optional[int]: return ClapFeatureExtractor.from_pretrained(self.checkpoint , **lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: shutil.rmtree(self.tmpdirname ) def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = self.get_feature_extractor() lowerCAmelCase__ = ClapProcessor(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase__ = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCamelCase_ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowerCAmelCase__ = self.get_feature_extractor(do_normalize=lowerCamelCase_ , padding_value=1.0 ) lowerCAmelCase__ = ClapProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCamelCase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCamelCase_ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = self.get_feature_extractor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = ClapProcessor(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ ) lowerCAmelCase__ = floats_list((3, 10_00) ) lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ) lowerCAmelCase__ = processor(audios=lowerCamelCase_ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = self.get_feature_extractor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = ClapProcessor(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ ) lowerCAmelCase__ = '''This is a test string''' lowerCAmelCase__ = processor(text=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer(lowerCamelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = self.get_feature_extractor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = ClapProcessor(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ ) lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase__ = processor.batch_decode(lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.batch_decode(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = self.get_feature_extractor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = ClapProcessor(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
90
'''simple docstring''' import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __UpperCAmelCase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def _snake_case ( A , A=None ) -> Optional[Any]: require_version(deps[pkg] , A )
90
1
'''simple docstring''' import os def _snake_case ( ) -> Any: lowerCAmelCase__ = os.path.join(os.path.dirname(A ) , '''num.txt''' ) with open(A ) as file_hand: return str(sum(int(A ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
90
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( A , A=False , A=False , A=False ) -> Union[str, Any]: lowerCAmelCase__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") ) # embeddings rename_keys.extend( [ # text embeddings ('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''), ( '''text_embeddings.position_embeddings.weight''', '''vilt.embeddings.text_embeddings.position_embeddings.weight''', ), ('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''), ( '''text_embeddings.token_type_embeddings.weight''', '''vilt.embeddings.text_embeddings.token_type_embeddings.weight''', ), ('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''), ('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''), # patch embeddings ('''transformer.cls_token''', '''vilt.embeddings.cls_token'''), ('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''), ('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''), ('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''), # token type embeddings ('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''), ] ) # final layernorm + pooler rename_keys.extend( [ ('''transformer.norm.weight''', '''vilt.layernorm.weight'''), ('''transformer.norm.bias''', '''vilt.layernorm.bias'''), ('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''), ('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('''vqa_classifier.0.weight''', '''classifier.0.weight'''), ('''vqa_classifier.0.bias''', '''classifier.0.bias'''), ('''vqa_classifier.1.weight''', '''classifier.1.weight'''), ('''vqa_classifier.1.bias''', '''classifier.1.bias'''), ('''vqa_classifier.3.weight''', '''classifier.3.weight'''), ('''vqa_classifier.3.bias''', '''classifier.3.bias'''), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''), ('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''), ('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''), ('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''), ('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''), ('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''), ] ) else: pass return rename_keys def _snake_case ( A , A ) -> List[str]: for i in range(config.num_hidden_layers ): lowerCAmelCase__ = '''vilt.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" ) lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase__ = in_proj_weight[ : config.hidden_size, : ] lowerCAmelCase__ = in_proj_bias[: config.hidden_size] lowerCAmelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCAmelCase__ = in_proj_bias[-config.hidden_size :] def _snake_case ( A ) -> List[str]: lowerCAmelCase__ = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(A , A ) def _snake_case ( A , A , A ) -> str: lowerCAmelCase__ = dct.pop(A ) lowerCAmelCase__ = val @torch.no_grad() def _snake_case ( A , A ) -> Any: lowerCAmelCase__ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=A ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False if "vqa" in checkpoint_url: lowerCAmelCase__ = True lowerCAmelCase__ = 3129 lowerCAmelCase__ = '''huggingface/label-files''' lowerCAmelCase__ = '''vqa2-id2label.json''' lowerCAmelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase__ = {int(A ): v for k, v in idalabel.items()} lowerCAmelCase__ = idalabel lowerCAmelCase__ = {v: k for k, v in idalabel.items()} lowerCAmelCase__ = ViltForQuestionAnswering(A ) elif "nlvr" in checkpoint_url: lowerCAmelCase__ = True lowerCAmelCase__ = 2 lowerCAmelCase__ = {0: '''False''', 1: '''True'''} lowerCAmelCase__ = {v: k for k, v in config.idalabel.items()} lowerCAmelCase__ = 3 lowerCAmelCase__ = ViltForImagesAndTextClassification(A ) elif "irtr" in checkpoint_url: lowerCAmelCase__ = True lowerCAmelCase__ = ViltForImageAndTextRetrieval(A ) elif "mlm_itm" in checkpoint_url: lowerCAmelCase__ = True lowerCAmelCase__ = ViltForMaskedLM(A ) else: raise ValueError('''Unknown model type''' ) # load state_dict of original model, remove and rename some keys lowerCAmelCase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''state_dict'''] lowerCAmelCase__ = create_rename_keys(A , A , A , A ) for src, dest in rename_keys: rename_key(A , A , A ) read_in_q_k_v(A , A ) if mlm_model or irtr_model: lowerCAmelCase__ = ['''itm_score.fc.weight''', '''itm_score.fc.bias'''] for k in ignore_keys: state_dict.pop(A , A ) # load state dict into HuggingFace model model.eval() if mlm_model: lowerCAmelCase__ , lowerCAmelCase__ = model.load_state_dict(A , strict=A ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(A ) # Define processor lowerCAmelCase__ = ViltImageProcessor(size=384 ) lowerCAmelCase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' ) lowerCAmelCase__ = ViltProcessor(A , A ) # Forward pass on example inputs (image + text) if nlvr_model: lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw ) lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw ) lowerCAmelCase__ = ( '''The left image contains twice the number of dogs as the right image, and at least two dogs in total are''' ''' standing.''' ) lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' ) lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' ) lowerCAmelCase__ = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: lowerCAmelCase__ = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=A ).raw ) if mlm_model: lowerCAmelCase__ = '''a bunch of [MASK] laying on a [MASK].''' else: lowerCAmelCase__ = '''How many cats are there?''' lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' ) lowerCAmelCase__ = model(**A ) # Verify outputs if mlm_model: lowerCAmelCase__ = torch.Size([1, 11, 30522] ) lowerCAmelCase__ = torch.tensor([-12.5_061, -12.5_123, -12.5_174] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 ) # verify masked token prediction equals "cats" lowerCAmelCase__ = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: lowerCAmelCase__ = torch.Size([1, 3129] ) lowerCAmelCase__ = torch.tensor([-15.9_495, -18.1_472, -10.3_041] ) assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 ) # verify vqa prediction equals "2" lowerCAmelCase__ = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: lowerCAmelCase__ = torch.Size([1, 2] ) lowerCAmelCase__ = torch.tensor([-2.8_721, 2.1_291] ) assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(A ).mkdir(exist_ok=A ) print(F"""Saving model and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(A ) processor.save_pretrained(A ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) __UpperCAmelCase = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
90
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class a__ ( a__ ): '''simple docstring''' def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> None: warnings.warn( '''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DonutImageProcessor instead.''' , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
90
'''simple docstring''' import re def _snake_case ( A ) -> bool: lowerCAmelCase__ = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' ) if match := re.search(A , A ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator('''+918827897895'''))
90
1
'''simple docstring''' def _snake_case ( A , A ) -> Dict: if b == 0: return 1 if (b % 2) == 0: return actual_power(A , int(b / 2 ) ) * actual_power(A , int(b / 2 ) ) else: return a * actual_power(A , int(b / 2 ) ) * actual_power(A , int(b / 2 ) ) def _snake_case ( A , A ) -> float: if b < 0: return 1 / actual_power(A , A ) return actual_power(A , A ) if __name__ == "__main__": print(power(-2, -3))
90
'''simple docstring''' import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'''vocab_file''': '''vocab.txt'''} __UpperCAmelCase = { '''vocab_file''': { '''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''', '''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''', }, } __UpperCAmelCase = { '''facebook/esm2_t6_8M_UR50D''': 1_024, '''facebook/esm2_t12_35M_UR50D''': 1_024, } def _snake_case ( A ) -> Optional[Any]: with open(A , '''r''' ) as f: lowerCAmelCase__ = f.read().splitlines() return [l.strip() for l in lines] class a__ ( a__ ): '''simple docstring''' lowercase__ : Optional[Any] = VOCAB_FILES_NAMES lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_="<eos>" , **lowerCamelCase_ , ) -> Tuple: super().__init__(**lowerCamelCase_ ) lowerCAmelCase__ = load_vocab_file(lowerCamelCase_ ) lowerCAmelCase__ = dict(enumerate(self.all_tokens ) ) lowerCAmelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )} lowerCAmelCase__ = unk_token lowerCAmelCase__ = cls_token lowerCAmelCase__ = pad_token lowerCAmelCase__ = mask_token lowerCAmelCase__ = eos_token lowerCAmelCase__ = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str: return self._id_to_token.get(lowerCamelCase_ , self.unk_token ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]: return text.split() def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False ) -> Dict: return len(self._id_to_token ) def __SCREAMING_SNAKE_CASE ( self ) -> int: return {token: i for i, token in enumerate(self.all_tokens )} def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str: return self._id_to_token.get(lowerCamelCase_ , self.unk_token ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]: lowerCAmelCase__ = [self.cls_token_id] lowerCAmelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] lowerCAmelCase__ = [1] + ([0] * len(lowerCamelCase_ )) + [1] if token_ids_a is not None: mask += [0] * len(lowerCamelCase_ ) + [1] return mask def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]: lowerCAmelCase__ = os.path.join(lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' ) with open(lowerCamelCase_ , '''w''' ) as f: f.write('''\n'''.join(self.all_tokens ) ) return (vocab_file,) @property def __SCREAMING_SNAKE_CASE ( self ) -> int: return self.get_vocab_size(with_added_tokens=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False ) -> int: return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ )
90
1
'''simple docstring''' import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def _snake_case ( A , A , A ) -> Union[str, Any]: lowerCAmelCase__ = OmegaConf.load(A ) lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )['''model'''] lowerCAmelCase__ = list(state_dict.keys() ) # extract state_dict for VQVAE lowerCAmelCase__ = {} lowerCAmelCase__ = '''first_stage_model.''' for key in keys: if key.startswith(A ): lowerCAmelCase__ = state_dict[key] # extract state_dict for UNetLDM lowerCAmelCase__ = {} lowerCAmelCase__ = '''model.diffusion_model.''' for key in keys: if key.startswith(A ): lowerCAmelCase__ = state_dict[key] lowerCAmelCase__ = config.model.params.first_stage_config.params lowerCAmelCase__ = config.model.params.unet_config.params lowerCAmelCase__ = VQModel(**A ).eval() vqvae.load_state_dict(A ) lowerCAmelCase__ = UNetLDMModel(**A ).eval() unet.load_state_dict(A ) lowerCAmelCase__ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=A , ) lowerCAmelCase__ = LDMPipeline(A , A , A ) pipeline.save_pretrained(A ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', type=str, required=True) parser.add_argument('''--config_path''', type=str, required=True) parser.add_argument('''--output_path''', type=str, required=True) __UpperCAmelCase = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
90
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a__ ( a__ , a__ , a__ , unittest.TestCase ): '''simple docstring''' lowercase__ : Optional[Any] = AltDiffusionPipeline lowercase__ : Dict = TEXT_TO_IMAGE_PARAMS lowercase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS lowercase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS lowercase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS def __SCREAMING_SNAKE_CASE ( self ) -> str: torch.manual_seed(0 ) lowerCAmelCase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) lowerCAmelCase__ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , ) torch.manual_seed(0 ) lowerCAmelCase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) lowerCAmelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , ) lowerCAmelCase__ = CLIPTextModel(lowerCamelCase_ ) lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) lowerCAmelCase__ = 77 lowerCAmelCase__ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> List[str]: if str(lowerCamelCase_ ).startswith('''mps''' ): lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ ) else: lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) lowerCAmelCase__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ = self.get_dummy_components() torch.manual_seed(0 ) lowerCAmelCase__ = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , ) # TODO: remove after fixing the non-deterministic text encoder lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ ) lowerCAmelCase__ = text_encoder lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ ) lowerCAmelCase__ = '''A photo of an astronaut''' lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ = np.array( [0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ = self.get_dummy_components() lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) torch.manual_seed(0 ) lowerCAmelCase__ = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , ) # TODO: remove after fixing the non-deterministic text encoder lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ ) lowerCAmelCase__ = text_encoder lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ = np.array( [0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: # make sure here that pndm scheduler skips prk lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = '''A painting of a squirrel eating a burger''' lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __SCREAMING_SNAKE_CASE ( self ) -> Any: lowerCAmelCase__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' ) lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = '''A painting of a squirrel eating a burger''' lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
90
1
'''simple docstring''' import math import flax.linen as nn import jax.numpy as jnp def _snake_case ( A , A , A = 1 , A = 1 , A = 1.0E4 , A = False , A = 1.0 , ) -> jnp.ndarray: assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even""" lowerCAmelCase__ = float(embedding_dim // 2 ) lowerCAmelCase__ = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) lowerCAmelCase__ = min_timescale * jnp.exp(jnp.arange(A , dtype=jnp.floataa ) * -log_timescale_increment ) lowerCAmelCase__ = jnp.expand_dims(A , 1 ) * jnp.expand_dims(A , 0 ) # scale embeddings lowerCAmelCase__ = scale * emb if flip_sin_to_cos: lowerCAmelCase__ = jnp.concatenate([jnp.cos(A ), jnp.sin(A )] , axis=1 ) else: lowerCAmelCase__ = jnp.concatenate([jnp.sin(A ), jnp.cos(A )] , axis=1 ) lowerCAmelCase__ = jnp.reshape(A , [jnp.shape(A )[0], embedding_dim] ) return signal class a__ ( nn.Module ): '''simple docstring''' lowercase__ : int = 3_2 lowercase__ : jnp.dtype = jnp.floataa @nn.compact def __call__( self , lowerCamelCase_ ) -> Tuple: lowerCAmelCase__ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(lowerCamelCase_ ) lowerCAmelCase__ = nn.silu(lowerCamelCase_ ) lowerCAmelCase__ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(lowerCamelCase_ ) return temb class a__ ( nn.Module ): '''simple docstring''' lowercase__ : int = 3_2 lowercase__ : bool = False lowercase__ : float = 1 @nn.compact def __call__( self , lowerCamelCase_ ) -> Optional[Any]: return get_sinusoidal_embeddings( lowerCamelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
90
'''simple docstring''' def _snake_case ( A , A ) -> int: return x if y == 0 else greatest_common_divisor(A , x % y ) def _snake_case ( A , A ) -> int: return (x * y) // greatest_common_divisor(A , A ) def _snake_case ( A = 20 ) -> int: lowerCAmelCase__ = 1 for i in range(1 , n + 1 ): lowerCAmelCase__ = lcm(A , A ) return g if __name__ == "__main__": print(f"""{solution() = }""")
90
1
'''simple docstring''' import math import sys def _snake_case ( A ) -> str: lowerCAmelCase__ = '''''' try: with open(A , '''rb''' ) as binary_file: lowerCAmelCase__ = binary_file.read() for dat in data: lowerCAmelCase__ = F"""{dat:08b}""" result += curr_byte return result except OSError: print('''File not accessible''' ) sys.exit() def _snake_case ( A ) -> str: lowerCAmelCase__ = {'''0''': '''0''', '''1''': '''1'''} lowerCAmelCase__ , lowerCAmelCase__ = '''''', '''''' lowerCAmelCase__ = len(A ) for i in range(len(A ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue lowerCAmelCase__ = lexicon[curr_string] result += last_match_id lowerCAmelCase__ = last_match_id + '''0''' if math.loga(A ).is_integer(): lowerCAmelCase__ = {} for curr_key in list(A ): lowerCAmelCase__ = lexicon.pop(A ) lowerCAmelCase__ = new_lex lowerCAmelCase__ = last_match_id + '''1''' index += 1 lowerCAmelCase__ = '''''' return result def _snake_case ( A , A ) -> None: lowerCAmelCase__ = 8 try: with open(A , '''wb''' ) as opened_file: lowerCAmelCase__ = [ to_write[i : i + byte_length] for i in range(0 , len(A ) , A ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('''10000000''' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(A , 2 ).to_bytes(1 , byteorder='''big''' ) ) except OSError: print('''File not accessible''' ) sys.exit() def _snake_case ( A ) -> str: lowerCAmelCase__ = 0 for letter in data_bits: if letter == "1": break counter += 1 lowerCAmelCase__ = data_bits[counter:] lowerCAmelCase__ = data_bits[counter + 1 :] return data_bits def _snake_case ( A , A ) -> None: lowerCAmelCase__ = read_file_binary(A ) lowerCAmelCase__ = remove_prefix(A ) lowerCAmelCase__ = decompress_data(A ) write_file_binary(A , A ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
90
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu __UpperCAmelCase = [ '''EAGER''', '''AOT_EAGER''', '''INDUCTOR''', '''NVFUSER''', '''AOT_NVFUSER''', '''AOT_CUDAGRAPHS''', '''OFI''', '''FX2TRT''', '''ONNXRT''', '''IPEX''', ] def _snake_case ( A , A=None , A=None , A=None ) -> Union[str, Any]: lowerCAmelCase__ = True while ask_again: lowerCAmelCase__ = input(A ) try: if default is not None and len(A ) == 0: return default return convert_value(A ) if convert_value is not None else result except Exception: if error_message is not None: print(A ) def _snake_case ( A , A=[] , A=None , A=0 ) -> List[Any]: lowerCAmelCase__ = BulletMenu(A , A ) lowerCAmelCase__ = menu.run(default_choice=A ) return convert_value(A ) if convert_value is not None else result def _snake_case ( A ) -> Tuple: lowerCAmelCase__ = int(A ) return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] ) def _snake_case ( A ) -> Union[str, Any]: lowerCAmelCase__ = int(A ) return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] ) def _snake_case ( A ) -> str: lowerCAmelCase__ = int(A ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def _snake_case ( A ) -> Tuple: lowerCAmelCase__ = int(A ) return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] ) def _snake_case ( A ) -> Union[str, Any]: lowerCAmelCase__ = int(A ) return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] ) def _snake_case ( A ) -> List[str]: return {"yes": True, "no": False}[value.lower()] class a__ ( argparse.RawDescriptionHelpFormatter ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict: lowerCAmelCase__ = super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = usage.replace('''<command> [<args>] ''' , '''''' ) return usage
90
1
'''simple docstring''' from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def _snake_case ( A , A , A = False ) -> list[float]: if radian_mode: return [magnitude * cos(A ), magnitude * sin(A )] return [magnitude * cos(radians(A ) ), magnitude * sin(radians(A ) )] def _snake_case ( A , A , A = 10**-1 ) -> bool: lowerCAmelCase__ = cross(A , A ) lowerCAmelCase__ = sum(A ) return abs(A ) < eps if __name__ == "__main__": # Test to check if it works __UpperCAmelCase = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) __UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __UpperCAmelCase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) __UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __UpperCAmelCase = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]]) __UpperCAmelCase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
90
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class a__ ( a__ ): '''simple docstring''' lowercase__ : torch.FloatTensor class a__ ( a__ , a__ ): '''simple docstring''' @register_to_config def __init__( self , lowerCamelCase_ = 3 , lowerCamelCase_ = 3 , lowerCamelCase_ = ("DownEncoderBlock2D",) , lowerCamelCase_ = ("UpDecoderBlock2D",) , lowerCamelCase_ = (64,) , lowerCamelCase_ = 1 , lowerCamelCase_ = "silu" , lowerCamelCase_ = 3 , lowerCamelCase_ = 32 , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 32 , lowerCamelCase_ = None , lowerCamelCase_ = 0.18_215 , lowerCamelCase_ = "group" , ) -> Union[str, Any]: super().__init__() # pass init params to Encoder lowerCAmelCase__ = Encoder( in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , ) lowerCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 ) lowerCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ ) lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 ) # pass init params to Decoder lowerCAmelCase__ = Decoder( in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , ) @apply_forward_hook def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> VQEncoderOutput: lowerCAmelCase__ = self.encoder(lowerCamelCase_ ) lowerCAmelCase__ = self.quant_conv(lowerCamelCase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowerCamelCase_ ) @apply_forward_hook def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: # also go through quantization layer if not force_not_quantize: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.quantize(lowerCamelCase_ ) else: lowerCAmelCase__ = h lowerCAmelCase__ = self.post_quant_conv(lowerCamelCase_ ) lowerCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == '''spatial''' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: lowerCAmelCase__ = sample lowerCAmelCase__ = self.encode(lowerCamelCase_ ).latents lowerCAmelCase__ = self.decode(lowerCamelCase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase_ )
90
1
'''simple docstring''' from pathlib import Path import numpy as np from PIL import Image def _snake_case ( A ) -> np.ndarray: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b def _snake_case ( A ) -> np.ndarray: return (gray > 127) & (gray <= 255) def _snake_case ( A , A ) -> np.ndarray: lowerCAmelCase__ = np.zeros_like(A ) lowerCAmelCase__ = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image lowerCAmelCase__ = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): lowerCAmelCase__ = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() lowerCAmelCase__ = int(summation > 0 ) return output if __name__ == "__main__": # read original image __UpperCAmelCase = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg''' __UpperCAmelCase = np.array(Image.open(lena_path)) # kernel to be applied __UpperCAmelCase = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) __UpperCAmelCase = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image __UpperCAmelCase = Image.fromarray(output).convert('''RGB''') pil_img.save('''result_dilation.png''')
90
'''simple docstring''' from __future__ import annotations __UpperCAmelCase = list[list[int]] # assigning initial values to the grid __UpperCAmelCase = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __UpperCAmelCase = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def _snake_case ( A , A , A , A ) -> bool: for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def _snake_case ( A ) -> tuple[int, int] | None: for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def _snake_case ( A ) -> Matrix | None: if location := find_empty_location(A ): lowerCAmelCase__ , lowerCAmelCase__ = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(A , A , A , A ): lowerCAmelCase__ = digit if sudoku(A ) is not None: return grid lowerCAmelCase__ = 0 return None def _snake_case ( A ) -> None: for row in grid: for cell in row: print(A , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('''\nExample grid:\n''' + '''=''' * 20) print_solution(example_grid) print('''\nExample grid solution:''') __UpperCAmelCase = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('''Cannot find a solution.''')
90
1
'''simple docstring''' from __future__ import annotations __UpperCAmelCase = list[list[int]] # assigning initial values to the grid __UpperCAmelCase = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __UpperCAmelCase = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def _snake_case ( A , A , A , A ) -> bool: for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def _snake_case ( A ) -> tuple[int, int] | None: for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def _snake_case ( A ) -> Matrix | None: if location := find_empty_location(A ): lowerCAmelCase__ , lowerCAmelCase__ = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(A , A , A , A ): lowerCAmelCase__ = digit if sudoku(A ) is not None: return grid lowerCAmelCase__ = 0 return None def _snake_case ( A ) -> None: for row in grid: for cell in row: print(A , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('''\nExample grid:\n''' + '''=''' * 20) print_solution(example_grid) print('''\nExample grid solution:''') __UpperCAmelCase = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('''Cannot find a solution.''')
90
'''simple docstring''' def _snake_case ( A ) -> int: if n == 1 or not isinstance(A , A ): return 0 elif n == 2: return 1 else: lowerCAmelCase__ = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _snake_case ( A ) -> int: lowerCAmelCase__ = 0 lowerCAmelCase__ = 2 while digits < n: index += 1 lowerCAmelCase__ = len(str(fibonacci(A ) ) ) return index def _snake_case ( A = 1000 ) -> int: return fibonacci_digits_index(A ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
90
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class a__ ( a__ ): '''simple docstring''' lowercase__ : int = "dandelin/vilt-b32-finetuned-vqa" lowercase__ : Optional[int] = ( "This is a tool that answers a question about an image. It takes an input named `image` which should be the " "image containing the information, as well as a `question` which should be the question in English. It " "returns a text that is the answer to the question." ) lowercase__ : Any = "image_qa" lowercase__ : Any = AutoProcessor lowercase__ : Dict = AutoModelForVisualQuestionAnswering lowercase__ : Union[str, Any] = ["image", "text"] lowercase__ : List[Any] = ["text"] def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> List[Any]: requires_backends(self , ['''vision'''] ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> str: return self.pre_processor(lowerCamelCase_ , lowerCamelCase_ , return_tensors='''pt''' ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]: with torch.no_grad(): return self.model(**lowerCamelCase_ ).logits def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Any: lowerCAmelCase__ = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
90
'''simple docstring''' from __future__ import annotations from random import choice def _snake_case ( A ) -> int: return choice(A ) def _snake_case ( A , A ) -> int: lowerCAmelCase__ = random_pivot(A ) # partition based on pivot # linear time lowerCAmelCase__ = [e for e in lst if e < pivot] lowerCAmelCase__ = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(A ) == k - 1: return pivot # pivot is in elements bigger than k elif len(A ) < k - 1: return kth_number(A , k - len(A ) - 1 ) # pivot is in elements smaller than k else: return kth_number(A , A ) if __name__ == "__main__": import doctest doctest.testmod()
90
1
'''simple docstring''' import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset __UpperCAmelCase = pd.read_csv( '''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/''' '''position_salaries.csv''' ) __UpperCAmelCase = dataset.iloc[:, 1:2].values __UpperCAmelCase = dataset.iloc[:, 2].values __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0) __UpperCAmelCase = PolynomialFeatures(degree=4) __UpperCAmelCase = poly_reg.fit_transform(X) __UpperCAmelCase = LinearRegression() pol_reg.fit(X_poly, y) def _snake_case ( ) -> List[str]: plt.scatter(A , A , color='''red''' ) plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='''blue''' ) plt.title('''Truth or Bluff (Linear Regression)''' ) plt.xlabel('''Position level''' ) plt.ylabel('''Salary''' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
90
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
90
1
'''simple docstring''' import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated __UpperCAmelCase = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test''']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ __UpperCAmelCase = '''https://storage.googleapis.com/cvdf-datasets/mnist/''' def _snake_case ( A ) -> List[str]: lowerCAmelCase__ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=A )[0] @deprecated(A , '''Please use tf.data to implement this functionality.''' ) def _snake_case ( A ) -> Any: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=A ) as bytestream: lowerCAmelCase__ = _readaa(A ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) lowerCAmelCase__ = _readaa(A ) lowerCAmelCase__ = _readaa(A ) lowerCAmelCase__ = _readaa(A ) lowerCAmelCase__ = bytestream.read(rows * cols * num_images ) lowerCAmelCase__ = numpy.frombuffer(A , dtype=numpy.uinta ) lowerCAmelCase__ = data.reshape(A , A , A , 1 ) return data @deprecated(A , '''Please use tf.one_hot on tensors.''' ) def _snake_case ( A , A ) -> int: lowerCAmelCase__ = labels_dense.shape[0] lowerCAmelCase__ = numpy.arange(A ) * num_classes lowerCAmelCase__ = numpy.zeros((num_labels, num_classes) ) lowerCAmelCase__ = 1 return labels_one_hot @deprecated(A , '''Please use tf.data to implement this functionality.''' ) def _snake_case ( A , A=False , A=10 ) -> str: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=A ) as bytestream: lowerCAmelCase__ = _readaa(A ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) lowerCAmelCase__ = _readaa(A ) lowerCAmelCase__ = bytestream.read(A ) lowerCAmelCase__ = numpy.frombuffer(A , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(A , A ) return labels class a__ : '''simple docstring''' @deprecated( lowerCamelCase_ , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=dtypes.floataa , lowerCamelCase_=True , lowerCamelCase_=None , ) -> Optional[Any]: lowerCAmelCase__ , lowerCAmelCase__ = random_seed.get_seed(lowerCamelCase_ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) lowerCAmelCase__ = dtypes.as_dtype(lowerCamelCase_ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype ) if fake_data: lowerCAmelCase__ = 1_00_00 lowerCAmelCase__ = one_hot else: assert ( images.shape[0] == labels.shape[0] ), F"""images.shape: {images.shape} labels.shape: {labels.shape}""" lowerCAmelCase__ = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 lowerCAmelCase__ = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. lowerCAmelCase__ = images.astype(numpy.floataa ) lowerCAmelCase__ = numpy.multiply(lowerCamelCase_ , 1.0 / 255.0 ) lowerCAmelCase__ = images lowerCAmelCase__ = labels lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 @property def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: return self._images @property def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: return self._labels @property def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: return self._num_examples @property def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: return self._epochs_completed def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True ) -> Optional[Any]: if fake_data: lowerCAmelCase__ = [1] * 7_84 lowerCAmelCase__ = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(lowerCamelCase_ )], [fake_label for _ in range(lowerCamelCase_ )], ) lowerCAmelCase__ = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: lowerCAmelCase__ = numpy.arange(self._num_examples ) numpy.random.shuffle(lowerCamelCase_ ) lowerCAmelCase__ = self.images[perma] lowerCAmelCase__ = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch lowerCAmelCase__ = self._num_examples - start lowerCAmelCase__ = self._images[start : self._num_examples] lowerCAmelCase__ = self._labels[start : self._num_examples] # Shuffle the data if shuffle: lowerCAmelCase__ = numpy.arange(self._num_examples ) numpy.random.shuffle(lowerCamelCase_ ) lowerCAmelCase__ = self.images[perm] lowerCAmelCase__ = self.labels[perm] # Start next epoch lowerCAmelCase__ = 0 lowerCAmelCase__ = batch_size - rest_num_examples lowerCAmelCase__ = self._index_in_epoch lowerCAmelCase__ = self._images[start:end] lowerCAmelCase__ = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size lowerCAmelCase__ = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(A , '''Please write your own downloading logic.''' ) def _snake_case ( A , A , A ) -> Tuple: if not gfile.Exists(A ): gfile.MakeDirs(A ) lowerCAmelCase__ = os.path.join(A , A ) if not gfile.Exists(A ): urllib.request.urlretrieve(A , A ) # noqa: S310 with gfile.GFile(A ) as f: lowerCAmelCase__ = f.size() print('''Successfully downloaded''' , A , A , '''bytes.''' ) return filepath @deprecated( A , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def _snake_case ( A , A=False , A=False , A=dtypes.floataa , A=True , A=5000 , A=None , A=DEFAULT_SOURCE_URL , ) -> Dict: if fake_data: def fake(): return _DataSet( [] , [] , fake_data=A , one_hot=A , dtype=A , seed=A ) lowerCAmelCase__ = fake() lowerCAmelCase__ = fake() lowerCAmelCase__ = fake() return _Datasets(train=A , validation=A , test=A ) if not source_url: # empty string check lowerCAmelCase__ = DEFAULT_SOURCE_URL lowerCAmelCase__ = '''train-images-idx3-ubyte.gz''' lowerCAmelCase__ = '''train-labels-idx1-ubyte.gz''' lowerCAmelCase__ = '''t10k-images-idx3-ubyte.gz''' lowerCAmelCase__ = '''t10k-labels-idx1-ubyte.gz''' lowerCAmelCase__ = _maybe_download( A , A , source_url + train_images_file ) with gfile.Open(A , '''rb''' ) as f: lowerCAmelCase__ = _extract_images(A ) lowerCAmelCase__ = _maybe_download( A , A , source_url + train_labels_file ) with gfile.Open(A , '''rb''' ) as f: lowerCAmelCase__ = _extract_labels(A , one_hot=A ) lowerCAmelCase__ = _maybe_download( A , A , source_url + test_images_file ) with gfile.Open(A , '''rb''' ) as f: lowerCAmelCase__ = _extract_images(A ) lowerCAmelCase__ = _maybe_download( A , A , source_url + test_labels_file ) with gfile.Open(A , '''rb''' ) as f: lowerCAmelCase__ = _extract_labels(A , one_hot=A ) if not 0 <= validation_size <= len(A ): lowerCAmelCase__ = ( '''Validation size should be between 0 and ''' F"""{len(A )}. Received: {validation_size}.""" ) raise ValueError(A ) lowerCAmelCase__ = train_images[:validation_size] lowerCAmelCase__ = train_labels[:validation_size] lowerCAmelCase__ = train_images[validation_size:] lowerCAmelCase__ = train_labels[validation_size:] lowerCAmelCase__ = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} lowerCAmelCase__ = _DataSet(A , A , **A ) lowerCAmelCase__ = _DataSet(A , A , **A ) lowerCAmelCase__ = _DataSet(A , A , **A ) return _Datasets(train=A , validation=A , test=A )
90
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __UpperCAmelCase = TypeVar('''KEY''') __UpperCAmelCase = TypeVar('''VAL''') @dataclass(frozen=a__ , slots=a__ ) class a__ ( Generic[KEY, VAL] ): '''simple docstring''' lowercase__ : KEY lowercase__ : VAL class a__ ( _Item ): '''simple docstring''' def __init__( self ) -> None: super().__init__(lowerCamelCase_ , lowerCamelCase_ ) def __bool__( self ) -> bool: return False __UpperCAmelCase = _DeletedItem() class a__ ( MutableMapping[KEY, VAL] ): '''simple docstring''' def __init__( self , lowerCamelCase_ = 8 , lowerCamelCase_ = 0.75 ) -> None: lowerCAmelCase__ = initial_block_size lowerCAmelCase__ = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 lowerCAmelCase__ = capacity_factor lowerCAmelCase__ = 0 def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return hash(lowerCamelCase_ ) % len(self._buckets ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return (ind + 1) % len(self._buckets ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool: lowerCAmelCase__ = self._buckets[ind] if not stored: lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ ) self._len += 1 return True elif stored.key == key: lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ ) return True else: return False def __SCREAMING_SNAKE_CASE ( self ) -> bool: lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor return len(self ) >= int(lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> bool: if len(self._buckets ) <= self._initial_block_size: return False lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None: lowerCAmelCase__ = self._buckets lowerCAmelCase__ = [None] * new_size lowerCAmelCase__ = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def __SCREAMING_SNAKE_CASE ( self ) -> None: self._resize(len(self._buckets ) * 2 ) def __SCREAMING_SNAKE_CASE ( self ) -> None: self._resize(len(self._buckets ) // 2 ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterator[int]: lowerCAmelCase__ = self._get_bucket_index(lowerCamelCase_ ) for _ in range(len(self._buckets ) ): yield ind lowerCAmelCase__ = self._get_next_ind(lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None: for ind in self._iterate_buckets(lowerCamelCase_ ): if self._try_set(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): break def __setitem__( self , lowerCamelCase_ , lowerCamelCase_ ) -> None: if self._is_full(): self._size_up() self._add_item(lowerCamelCase_ , lowerCamelCase_ ) def __delitem__( self , lowerCamelCase_ ) -> None: for ind in self._iterate_buckets(lowerCamelCase_ ): lowerCAmelCase__ = self._buckets[ind] if item is None: raise KeyError(lowerCamelCase_ ) if item is _deleted: continue if item.key == key: lowerCAmelCase__ = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , lowerCamelCase_ ) -> VAL: for ind in self._iterate_buckets(lowerCamelCase_ ): lowerCAmelCase__ = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(lowerCamelCase_ ) def __len__( self ) -> int: return self._len def __iter__( self ) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self ) -> str: lowerCAmelCase__ = ''' ,'''.join( F"""{item.key}: {item.val}""" for item in self._buckets if item ) return F"""HashMap({val_string})"""
90
1
'''simple docstring''' class a__ : '''simple docstring''' def __init__( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]: lowerCAmelCase__ = name lowerCAmelCase__ = val def __str__( self ) -> Optional[int]: return F"""{self.__class__.__name__}({self.name}, {self.val})""" def __lt__( self , lowerCamelCase_ ) -> str: return self.val < other.val class a__ : '''simple docstring''' def __init__( self , lowerCamelCase_ ) -> Optional[int]: lowerCAmelCase__ = {} lowerCAmelCase__ = {} lowerCAmelCase__ = self.build_heap(lowerCamelCase_ ) def __getitem__( self , lowerCamelCase_ ) -> Optional[int]: return self.get_value(lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Dict: return (idx - 1) // 2 def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Dict: return idx * 2 + 1 def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]: return idx * 2 + 2 def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Any: return self.heap_dict[key] def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[int]: lowerCAmelCase__ = len(lowerCamelCase_ ) - 1 lowerCAmelCase__ = self.get_parent_idx(lowerCamelCase_ ) for idx, i in enumerate(lowerCamelCase_ ): lowerCAmelCase__ = idx lowerCAmelCase__ = i.val for i in range(lowerCamelCase_ , -1 , -1 ): self.sift_down(lowerCamelCase_ , lowerCamelCase_ ) return array def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Any: while True: lowerCAmelCase__ = self.get_left_child_idx(lowerCamelCase_ ) # noqa: E741 lowerCAmelCase__ = self.get_right_child_idx(lowerCamelCase_ ) lowerCAmelCase__ = idx if l < len(lowerCamelCase_ ) and array[l] < array[idx]: lowerCAmelCase__ = l if r < len(lowerCamelCase_ ) and array[r] < array[smallest]: lowerCAmelCase__ = r if smallest != idx: lowerCAmelCase__ , lowerCAmelCase__ = array[smallest], array[idx] ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowerCAmelCase__ = smallest else: break def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[str]: lowerCAmelCase__ = self.get_parent_idx(lowerCamelCase_ ) while p >= 0 and self.heap[p] > self.heap[idx]: lowerCAmelCase__ , lowerCAmelCase__ = self.heap[idx], self.heap[p] lowerCAmelCase__ , lowerCAmelCase__ = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowerCAmelCase__ = p lowerCAmelCase__ = self.get_parent_idx(lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: return self.heap[0] def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ , lowerCAmelCase__ = self.heap[-1], self.heap[0] lowerCAmelCase__ , lowerCAmelCase__ = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowerCAmelCase__ = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tuple: self.heap.append(lowerCamelCase_ ) lowerCAmelCase__ = len(self.heap ) - 1 lowerCAmelCase__ = node.val self.sift_up(len(self.heap ) - 1 ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: return len(self.heap ) == 0 def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> str: assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowerCAmelCase__ = new_value lowerCAmelCase__ = new_value self.sift_up(self.idx_of_element[node] ) __UpperCAmelCase = Node('''R''', -1) __UpperCAmelCase = Node('''B''', 6) __UpperCAmelCase = Node('''A''', 3) __UpperCAmelCase = Node('''X''', 1) __UpperCAmelCase = Node('''E''', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __UpperCAmelCase = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('''Min Heap - before decrease key''') for i in my_min_heap.heap: print(i) print('''Min Heap - After decrease key of node [B -> -17]''') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
90
'''simple docstring''' import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def _snake_case ( A , A , A ) -> Union[str, Any]: lowerCAmelCase__ = OmegaConf.load(A ) lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )['''model'''] lowerCAmelCase__ = list(state_dict.keys() ) # extract state_dict for VQVAE lowerCAmelCase__ = {} lowerCAmelCase__ = '''first_stage_model.''' for key in keys: if key.startswith(A ): lowerCAmelCase__ = state_dict[key] # extract state_dict for UNetLDM lowerCAmelCase__ = {} lowerCAmelCase__ = '''model.diffusion_model.''' for key in keys: if key.startswith(A ): lowerCAmelCase__ = state_dict[key] lowerCAmelCase__ = config.model.params.first_stage_config.params lowerCAmelCase__ = config.model.params.unet_config.params lowerCAmelCase__ = VQModel(**A ).eval() vqvae.load_state_dict(A ) lowerCAmelCase__ = UNetLDMModel(**A ).eval() unet.load_state_dict(A ) lowerCAmelCase__ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=A , ) lowerCAmelCase__ = LDMPipeline(A , A , A ) pipeline.save_pretrained(A ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', type=str, required=True) parser.add_argument('''--config_path''', type=str, required=True) parser.add_argument('''--output_path''', type=str, required=True) __UpperCAmelCase = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
90
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu __UpperCAmelCase = [ '''EAGER''', '''AOT_EAGER''', '''INDUCTOR''', '''NVFUSER''', '''AOT_NVFUSER''', '''AOT_CUDAGRAPHS''', '''OFI''', '''FX2TRT''', '''ONNXRT''', '''IPEX''', ] def _snake_case ( A , A=None , A=None , A=None ) -> Union[str, Any]: lowerCAmelCase__ = True while ask_again: lowerCAmelCase__ = input(A ) try: if default is not None and len(A ) == 0: return default return convert_value(A ) if convert_value is not None else result except Exception: if error_message is not None: print(A ) def _snake_case ( A , A=[] , A=None , A=0 ) -> List[Any]: lowerCAmelCase__ = BulletMenu(A , A ) lowerCAmelCase__ = menu.run(default_choice=A ) return convert_value(A ) if convert_value is not None else result def _snake_case ( A ) -> Tuple: lowerCAmelCase__ = int(A ) return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] ) def _snake_case ( A ) -> Union[str, Any]: lowerCAmelCase__ = int(A ) return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] ) def _snake_case ( A ) -> str: lowerCAmelCase__ = int(A ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def _snake_case ( A ) -> Tuple: lowerCAmelCase__ = int(A ) return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] ) def _snake_case ( A ) -> Union[str, Any]: lowerCAmelCase__ = int(A ) return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] ) def _snake_case ( A ) -> List[str]: return {"yes": True, "no": False}[value.lower()] class a__ ( argparse.RawDescriptionHelpFormatter ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict: lowerCAmelCase__ = super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = usage.replace('''<command> [<args>] ''' , '''''' ) return usage
90
'''simple docstring''' import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __UpperCAmelCase = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class a__ ( a__ ): '''simple docstring''' lowercase__ : bool = field(default=a__ , metadata={"help": "Whether to use SortishSampler or not."} ) lowercase__ : bool = field( default=a__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowercase__ : Optional[int] = field( default=a__ , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowercase__ : Optional[int] = field( default=a__ , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowercase__ : Optional[Union[str, Path, GenerationConfig]] = field( default=a__ , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: lowerCAmelCase__ = super().to_dict() for k, v in d.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): lowerCAmelCase__ = v.to_dict() return d
90
1
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
90
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device __UpperCAmelCase = False class a__ ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe.dual_guided( prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase_ ) lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = generator.manual_seed(0 ) lowerCAmelCase__ = pipe.dual_guided( prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = '''cyberpunk 2077''' lowerCAmelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe.dual_guided( prompt=lowerCamelCase_ , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 lowerCAmelCase__ = '''A painting of a squirrel eating a burger ''' lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe.text_to_image( prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 lowerCAmelCase__ = pipe.image_variation(lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''numpy''' ).images lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
90
1
'''simple docstring''' from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def _snake_case ( A ) -> Dict[str, torch.Tensor]: lowerCAmelCase__ = [] lowerCAmelCase__ = [] lowerCAmelCase__ = [] for rt in rc.restypes: lowerCAmelCase__ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) lowerCAmelCase__ = {name: i for i, name in enumerate(A )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) lowerCAmelCase__ = torch.tensor( A , dtype=torch.intaa , device=protein['''aatype'''].device , ) lowerCAmelCase__ = torch.tensor( A , dtype=torch.intaa , device=protein['''aatype'''].device , ) lowerCAmelCase__ = torch.tensor( A , dtype=torch.floataa , device=protein['''aatype'''].device , ) lowerCAmelCase__ = protein['''aatype'''].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein lowerCAmelCase__ = restype_atomaa_to_atomaa[protein_aatype] lowerCAmelCase__ = restype_atomaa_mask[protein_aatype] lowerCAmelCase__ = residx_atomaa_mask lowerCAmelCase__ = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back lowerCAmelCase__ = restype_atomaa_to_atomaa[protein_aatype] lowerCAmelCase__ = residx_atomaa_to_atomaa.long() # create the corresponding mask lowerCAmelCase__ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device ) for restype, restype_letter in enumerate(rc.restypes ): lowerCAmelCase__ = rc.restype_atoa[restype_letter] lowerCAmelCase__ = rc.residue_atoms[restype_name] for atom_name in atom_names: lowerCAmelCase__ = rc.atom_order[atom_name] lowerCAmelCase__ = 1 lowerCAmelCase__ = restype_atomaa_mask[protein_aatype] lowerCAmelCase__ = residx_atomaa_mask return protein def _snake_case ( A ) -> Dict[str, np.ndarray]: lowerCAmelCase__ = tree_map(lambda A : torch.tensor(A , device=batch['''aatype'''].device ) , A , np.ndarray ) lowerCAmelCase__ = tensor_tree_map(lambda A : np.array(A ) , make_atomaa_masks(A ) ) return out
90
'''simple docstring''' from __future__ import annotations def _snake_case ( A ) -> bool: lowerCAmelCase__ = str(A ) return len(A ) == 9 and set(A ) == set('''123456789''' ) def _snake_case ( ) -> int | None: for base_num in range(9999 , 4999 , -1 ): lowerCAmelCase__ = 100002 * base_num if is_9_pandigital(A ): return candidate for base_num in range(333 , 99 , -1 ): lowerCAmelCase__ = 1002003 * base_num if is_9_pandigital(A ): return candidate return None if __name__ == "__main__": print(f"""{solution() = }""")
90
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: lowerCAmelCase__ = { '''task_specific_params''': { '''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_28, '''min_length''': 12, '''num_beams''': 4}, '''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_42, '''min_length''': 56, '''num_beams''': 4}, '''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6}, } } lowerCAmelCase__ = { '''task_specific_params.summarization.length_penalty''': 1.0, '''task_specific_params.summarization.max_length''': 1_28, '''task_specific_params.summarization.min_length''': 12, '''task_specific_params.summarization.num_beams''': 4, '''task_specific_params.summarization_cnn.length_penalty''': 2.0, '''task_specific_params.summarization_cnn.max_length''': 1_42, '''task_specific_params.summarization_cnn.min_length''': 56, '''task_specific_params.summarization_cnn.num_beams''': 4, '''task_specific_params.summarization_xsum.length_penalty''': 1.0, '''task_specific_params.summarization_xsum.max_length''': 62, '''task_specific_params.summarization_xsum.min_length''': 11, '''task_specific_params.summarization_xsum.num_beams''': 6, } self.assertEqual(flatten_dict(lowerCamelCase_ ) , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(lowerCamelCase_ ) , x.transpose() ) ) lowerCAmelCase__ = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(lowerCamelCase_ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: lowerCAmelCase__ = np.random.randn(3 , 4 ) lowerCAmelCase__ = torch.tensor(lowerCamelCase_ ) self.assertTrue(np.allclose(transpose(lowerCamelCase_ ) , transpose(lowerCamelCase_ ).numpy() ) ) lowerCAmelCase__ = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ = torch.tensor(lowerCamelCase_ ) self.assertTrue(np.allclose(transpose(lowerCamelCase_ , axes=(1, 2, 0) ) , transpose(lowerCamelCase_ , axes=(1, 2, 0) ).numpy() ) ) @require_tf def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = np.random.randn(3 , 4 ) lowerCAmelCase__ = tf.constant(lowerCamelCase_ ) self.assertTrue(np.allclose(transpose(lowerCamelCase_ ) , transpose(lowerCamelCase_ ).numpy() ) ) lowerCAmelCase__ = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ = tf.constant(lowerCamelCase_ ) self.assertTrue(np.allclose(transpose(lowerCamelCase_ , axes=(1, 2, 0) ) , transpose(lowerCamelCase_ , axes=(1, 2, 0) ).numpy() ) ) @require_flax def __SCREAMING_SNAKE_CASE ( self ) -> Any: lowerCAmelCase__ = np.random.randn(3 , 4 ) lowerCAmelCase__ = jnp.array(lowerCamelCase_ ) self.assertTrue(np.allclose(transpose(lowerCamelCase_ ) , np.asarray(transpose(lowerCamelCase_ ) ) ) ) lowerCAmelCase__ = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ = jnp.array(lowerCamelCase_ ) self.assertTrue(np.allclose(transpose(lowerCamelCase_ , axes=(1, 2, 0) ) , np.asarray(transpose(lowerCamelCase_ , axes=(1, 2, 0) ) ) ) ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (4, 3) ) , np.reshape(lowerCamelCase_ , (4, 3) ) ) ) lowerCAmelCase__ = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (12, 5) ) , np.reshape(lowerCamelCase_ , (12, 5) ) ) ) @require_torch def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: lowerCAmelCase__ = np.random.randn(3 , 4 ) lowerCAmelCase__ = torch.tensor(lowerCamelCase_ ) self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (4, 3) ) , reshape(lowerCamelCase_ , (4, 3) ).numpy() ) ) lowerCAmelCase__ = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ = torch.tensor(lowerCamelCase_ ) self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (12, 5) ) , reshape(lowerCamelCase_ , (12, 5) ).numpy() ) ) @require_tf def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = np.random.randn(3 , 4 ) lowerCAmelCase__ = tf.constant(lowerCamelCase_ ) self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (4, 3) ) , reshape(lowerCamelCase_ , (4, 3) ).numpy() ) ) lowerCAmelCase__ = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ = tf.constant(lowerCamelCase_ ) self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (12, 5) ) , reshape(lowerCamelCase_ , (12, 5) ).numpy() ) ) @require_flax def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = np.random.randn(3 , 4 ) lowerCAmelCase__ = jnp.array(lowerCamelCase_ ) self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (4, 3) ) , np.asarray(reshape(lowerCamelCase_ , (4, 3) ) ) ) ) lowerCAmelCase__ = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ = jnp.array(lowerCamelCase_ ) self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (12, 5) ) , np.asarray(reshape(lowerCamelCase_ , (12, 5) ) ) ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: lowerCAmelCase__ = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(lowerCamelCase_ ) , np.squeeze(lowerCamelCase_ ) ) ) lowerCAmelCase__ = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(lowerCamelCase_ , axis=2 ) , np.squeeze(lowerCamelCase_ , axis=2 ) ) ) @require_torch def __SCREAMING_SNAKE_CASE ( self ) -> Dict: lowerCAmelCase__ = np.random.randn(1 , 3 , 4 ) lowerCAmelCase__ = torch.tensor(lowerCamelCase_ ) self.assertTrue(np.allclose(squeeze(lowerCamelCase_ ) , squeeze(lowerCamelCase_ ).numpy() ) ) lowerCAmelCase__ = np.random.randn(1 , 4 , 1 , 5 ) lowerCAmelCase__ = torch.tensor(lowerCamelCase_ ) self.assertTrue(np.allclose(squeeze(lowerCamelCase_ , axis=2 ) , squeeze(lowerCamelCase_ , axis=2 ).numpy() ) ) @require_tf def __SCREAMING_SNAKE_CASE ( self ) -> Dict: lowerCAmelCase__ = np.random.randn(1 , 3 , 4 ) lowerCAmelCase__ = tf.constant(lowerCamelCase_ ) self.assertTrue(np.allclose(squeeze(lowerCamelCase_ ) , squeeze(lowerCamelCase_ ).numpy() ) ) lowerCAmelCase__ = np.random.randn(1 , 4 , 1 , 5 ) lowerCAmelCase__ = tf.constant(lowerCamelCase_ ) self.assertTrue(np.allclose(squeeze(lowerCamelCase_ , axis=2 ) , squeeze(lowerCamelCase_ , axis=2 ).numpy() ) ) @require_flax def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = np.random.randn(1 , 3 , 4 ) lowerCAmelCase__ = jnp.array(lowerCamelCase_ ) self.assertTrue(np.allclose(squeeze(lowerCamelCase_ ) , np.asarray(squeeze(lowerCamelCase_ ) ) ) ) lowerCAmelCase__ = np.random.randn(1 , 4 , 1 , 5 ) lowerCAmelCase__ = jnp.array(lowerCamelCase_ ) self.assertTrue(np.allclose(squeeze(lowerCamelCase_ , axis=2 ) , np.asarray(squeeze(lowerCamelCase_ , axis=2 ) ) ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: lowerCAmelCase__ = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(lowerCamelCase_ , axis=1 ) , np.expand_dims(lowerCamelCase_ , axis=1 ) ) ) @require_torch def __SCREAMING_SNAKE_CASE ( self ) -> Dict: lowerCAmelCase__ = np.random.randn(3 , 4 ) lowerCAmelCase__ = torch.tensor(lowerCamelCase_ ) self.assertTrue(np.allclose(expand_dims(lowerCamelCase_ , axis=1 ) , expand_dims(lowerCamelCase_ , axis=1 ).numpy() ) ) @require_tf def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = np.random.randn(3 , 4 ) lowerCAmelCase__ = tf.constant(lowerCamelCase_ ) self.assertTrue(np.allclose(expand_dims(lowerCamelCase_ , axis=1 ) , expand_dims(lowerCamelCase_ , axis=1 ).numpy() ) ) @require_flax def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = np.random.randn(3 , 4 ) lowerCAmelCase__ = jnp.array(lowerCamelCase_ ) self.assertTrue(np.allclose(expand_dims(lowerCamelCase_ , axis=1 ) , np.asarray(expand_dims(lowerCamelCase_ , axis=1 ) ) ) )
90
'''simple docstring''' # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES __UpperCAmelCase = '''tiny-wmt19-en-ru''' # Build # borrowed from a test __UpperCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] __UpperCAmelCase = dict(zip(vocab, range(len(vocab)))) __UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] with tempfile.TemporaryDirectory() as tmpdirname: __UpperCAmelCase = Path(tmpdirname) __UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file'''] __UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file'''] __UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file'''] with open(src_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, '''w''') as fp: fp.write('''\n'''.join(merges)) __UpperCAmelCase = FSMTTokenizer( langs=['''en''', '''ru'''], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) __UpperCAmelCase = FSMTConfig( langs=['''ru''', '''en'''], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) __UpperCAmelCase = FSMTForConditionalGeneration(config) print(f"""num of params {tiny_model.num_parameters()}""") # Test __UpperCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''') __UpperCAmelCase = tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"""Generated {mname_tiny}""") # Upload # transformers-cli upload tiny-wmt19-en-ru
90
1
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''vocab_file''': '''vocab.json''', '''tokenizer_config_file''': '''tokenizer_config.json''', '''merges_file''': '''merges.txt''', } __UpperCAmelCase = { '''vocab_file''': { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json''' ), }, '''tokenizer_config_file''': { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json''' ), }, '''merges_file''': { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt''' ), }, } __UpperCAmelCase = '''</w>''' __UpperCAmelCase = '''@@ ''' def _snake_case ( A ) -> Dict: lowerCAmelCase__ = set() lowerCAmelCase__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase__ = char return pairs # Speech2Text2 has no max input length __UpperCAmelCase = {'''facebook/s2t-wav2vec2-large-en-de''': 1_024} class a__ ( a__ ): '''simple docstring''' lowercase__ : List[Any] = VOCAB_FILES_NAMES lowercase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP lowercase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ : str = ["input_ids", "attention_mask"] def __init__( self , lowerCamelCase_ , lowerCamelCase_="<s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_=False , lowerCamelCase_=None , **lowerCamelCase_ , ) -> int: super().__init__( unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , **lowerCamelCase_ , ) lowerCAmelCase__ = do_lower_case with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle: lowerCAmelCase__ = json.load(lowerCamelCase_ ) lowerCAmelCase__ = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" ) lowerCAmelCase__ = None lowerCAmelCase__ = None else: with open(lowerCamelCase_ , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase__ = merges_handle.read().split('''\n''' )[:-1] lowerCAmelCase__ = [tuple(merge.split()[:2] ) for merge in merges] lowerCAmelCase__ = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) lowerCAmelCase__ = {} @property def __SCREAMING_SNAKE_CASE ( self ) -> int: return len(self.decoder ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tuple: lowerCAmelCase__ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] lowerCAmelCase__ = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: lowerCAmelCase__ = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase__ , lowerCAmelCase__ = bigram lowerCAmelCase__ = [] lowerCAmelCase__ = 0 while i < len(lowerCamelCase_ ): try: lowerCAmelCase__ = word.index(lowerCamelCase_ , lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase__ = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase__ = tuple(lowerCamelCase_ ) lowerCAmelCase__ = new_word if len(lowerCamelCase_ ) == 1: break else: lowerCAmelCase__ = get_pairs(lowerCamelCase_ ) lowerCAmelCase__ = ''' '''.join(lowerCamelCase_ ) if word == "\n " + BPE_TOKEN_MERGES: lowerCAmelCase__ = '''\n''' + BPE_TOKEN_MERGES if word.endswith(lowerCamelCase_ ): lowerCAmelCase__ = word.replace(lowerCamelCase_ , '''''' ) lowerCAmelCase__ = word.replace(''' ''' , lowerCamelCase_ ) lowerCAmelCase__ = word return word def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str: if self.bpe_ranks is None: raise ValueError( '''This tokenizer was instantiated without a `merges.txt` file, so''' ''' that it can only be used for decoding, not for encoding.''' '''Make sure to provide `merges.txt` file at instantiation to enable ''' '''encoding.''' ) if self.do_lower_case: lowerCAmelCase__ = text.lower() lowerCAmelCase__ = text.split() lowerCAmelCase__ = [] for token in text: if token: split_tokens.extend(list(self.bpe(lowerCamelCase_ ).split(''' ''' ) ) ) return split_tokens def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str: lowerCAmelCase__ = self.decoder.get(lowerCamelCase_ , self.unk_token ) return result def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str: lowerCAmelCase__ = ''' '''.join(lowerCamelCase_ ) # make sure @@ tokens are concatenated lowerCAmelCase__ = ''''''.join(string.split(lowerCamelCase_ ) ) return string def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple[str]: if not os.path.isdir(lowerCamelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ = os.path.join( lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase__ = os.path.join( lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' ) lowerCAmelCase__ = 0 if self.bpe_ranks is None: return (vocab_file,) with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) lowerCAmelCase__ = token_index writer.write(''' '''.join(lowerCamelCase_ ) + '''\n''' ) index += 1 return (vocab_file, merges_file)
90
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def _snake_case ( ) -> Union[str, Any]: raise RuntimeError('''CUDA out of memory.''' ) class a__ ( nn.Module ): '''simple docstring''' def __init__( self ) -> int: super().__init__() lowerCAmelCase__ = nn.Linear(3 , 4 ) lowerCAmelCase__ = nn.BatchNormad(4 ) lowerCAmelCase__ = nn.Linear(4 , 5 ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]: return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_ ) ) ) class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(lowerCamelCase_ ): nonlocal batch_sizes batch_sizes.append(lowerCamelCase_ ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ ): nonlocal batch_sizes batch_sizes.append(lowerCamelCase_ ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga lowerCAmelCase__ , lowerCAmelCase__ = mock_training_loop_function('''hello''' ) self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(lowerCamelCase_ ): pass with self.assertRaises(lowerCamelCase_ ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(lowerCamelCase_ ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(lowerCamelCase_ ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(lowerCamelCase_ ) as cm: mock_training_loop_function(1_28 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(lowerCamelCase_ ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(lowerCamelCase_ ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = torch.cuda.memory_allocated() lowerCAmelCase__ = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_ ) lowerCAmelCase__ = release_memory(lowerCamelCase_ ) self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_ )
90
1
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''', } class a__ ( a__ ): '''simple docstring''' lowercase__ : Union[str, Any] = "mvp" lowercase__ : Union[str, Any] = ["past_key_values"] lowercase__ : Optional[Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , lowerCamelCase_=5_02_67 , lowerCamelCase_=10_24 , lowerCamelCase_=12 , lowerCamelCase_=40_96 , lowerCamelCase_=16 , lowerCamelCase_=12 , lowerCamelCase_=40_96 , lowerCamelCase_=16 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_="gelu" , lowerCamelCase_=10_24 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.02 , lowerCamelCase_=0.0 , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=2 , lowerCamelCase_=True , lowerCamelCase_=2 , lowerCamelCase_=2 , lowerCamelCase_=False , lowerCamelCase_=1_00 , lowerCamelCase_=8_00 , **lowerCamelCase_ , ) -> Dict: lowerCAmelCase__ = vocab_size lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = d_model lowerCAmelCase__ = encoder_ffn_dim lowerCAmelCase__ = encoder_layers lowerCAmelCase__ = encoder_attention_heads lowerCAmelCase__ = decoder_ffn_dim lowerCAmelCase__ = decoder_layers lowerCAmelCase__ = decoder_attention_heads lowerCAmelCase__ = dropout lowerCAmelCase__ = attention_dropout lowerCAmelCase__ = activation_dropout lowerCAmelCase__ = activation_function lowerCAmelCase__ = init_std lowerCAmelCase__ = encoder_layerdrop lowerCAmelCase__ = decoder_layerdrop lowerCAmelCase__ = classifier_dropout lowerCAmelCase__ = use_cache lowerCAmelCase__ = encoder_layers lowerCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True lowerCAmelCase__ = use_prompt lowerCAmelCase__ = prompt_length lowerCAmelCase__ = prompt_mid_dim super().__init__( pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , forced_eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , lowerCamelCase_ ): lowerCAmelCase__ = self.bos_token_id warnings.warn( F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ '''The config can simply be saved and uploaded again to be fixed.''' )
90
'''simple docstring''' import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __UpperCAmelCase = logging.getLogger(__name__) def _snake_case ( A , A , A = None , A = None , A = None , A = None , A = None , A = False , ) -> Union[str, Any]: lowerCAmelCase__ = bnb_quantization_config.load_in_abit lowerCAmelCase__ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( '''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,''' ''' make sure you have the latest version of `bitsandbytes` installed.''' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( '''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,''' '''make sure you have the latest version of `bitsandbytes` installed.''' ) lowerCAmelCase__ = [] # custom device map if isinstance(A , A ) and len(device_map.keys() ) > 1: lowerCAmelCase__ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: lowerCAmelCase__ = get_keys_to_not_convert(A ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(A ) lowerCAmelCase__ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: lowerCAmelCase__ = [] lowerCAmelCase__ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(A ) # compatibility with peft lowerCAmelCase__ = load_in_abit lowerCAmelCase__ = load_in_abit lowerCAmelCase__ = get_parameter_device(A ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( '''It is not recommended to quantize a loaded model. ''' '''The model should be instantiated under the `init_empty_weights` context manager.''' ) lowerCAmelCase__ = replace_with_bnb_layers(A , A , modules_to_not_convert=A ) # convert param to the right dtype lowerCAmelCase__ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: lowerCAmelCase__ = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' ) lowerCAmelCase__ = getattr(A , A , A ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(A ): param.to(A ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info( F"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" '''We move the model to cuda.''' ) return model elif weights_location is None: raise RuntimeError( F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): lowerCAmelCase__ = replace_with_bnb_layers( A , A , modules_to_not_convert=A ) lowerCAmelCase__ = get_quantized_model_device_map( A , A , A , max_memory=A , no_split_module_classes=A , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): lowerCAmelCase__ = True lowerCAmelCase__ = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] ) load_checkpoint_in_model( A , A , A , dtype=bnb_quantization_config.torch_dtype , offload_folder=A , offload_state_dict=A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(A , device_map=A , offload_dir=A ) def _snake_case ( A , A , A=None , A=None , A=None ) -> List[Any]: if device_map is None: if torch.cuda.is_available(): lowerCAmelCase__ = {'''''': torch.cuda.current_device()} else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' ) if isinstance(A , A ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( '''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ''' '''\'sequential\'.''' ) lowerCAmelCase__ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) lowerCAmelCase__ = {} lowerCAmelCase__ = special_dtypes lowerCAmelCase__ = no_split_module_classes lowerCAmelCase__ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": lowerCAmelCase__ = get_balanced_memory( A , low_zero=(device_map == '''balanced_low_0''') , max_memory=A , **A , ) lowerCAmelCase__ = max_memory lowerCAmelCase__ = infer_auto_device_map(A , **A ) if isinstance(A , A ): # check if don't have any quantized module on the cpu lowerCAmelCase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules lowerCAmelCase__ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( ''' Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. ''' ) else: logger.info( '''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' ) del device_map_without_some_modules return device_map def _snake_case ( A , A , A=None , A=None ) -> Any: if modules_to_not_convert is None: lowerCAmelCase__ = [] lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers( A , A , A , A ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def _snake_case ( A , A , A=None , A=None , ) -> Optional[Any]: lowerCAmelCase__ = False for name, module in model.named_children(): if current_key_name is None: lowerCAmelCase__ = [] current_key_name.append(A ) if isinstance(A , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` lowerCAmelCase__ = '''.'''.join(A ) lowerCAmelCase__ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: lowerCAmelCase__ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: lowerCAmelCase__ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: lowerCAmelCase__ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' ) lowerCAmelCase__ = module.weight.data if module.bias is not None: lowerCAmelCase__ = module.bias.data bnb_module.requires_grad_(A ) setattr(A , A , A ) lowerCAmelCase__ = True if len(list(module.children() ) ) > 0: lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers( A , A , A , A ) lowerCAmelCase__ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _snake_case ( A ) -> Tuple: # Create a copy of the model with init_empty_weights(): lowerCAmelCase__ = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager` lowerCAmelCase__ = find_tied_parameters(A ) # For compatibility with Accelerate < 0.18 if isinstance(A , A ): lowerCAmelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowerCAmelCase__ = sum(A , [] ) lowerCAmelCase__ = len(A ) > 0 # Check if it is a base model lowerCAmelCase__ = False if hasattr(A , '''base_model_prefix''' ): lowerCAmelCase__ = not hasattr(A , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowerCAmelCase__ = list(model.named_children() ) lowerCAmelCase__ = [list_modules[-1][0]] # add last module together with tied weights lowerCAmelCase__ = set(A ) - set(A ) lowerCAmelCase__ = list(set(A ) ) + list(A ) # remove ".weight" from the keys lowerCAmelCase__ = ['''.weight''', '''.bias'''] lowerCAmelCase__ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowerCAmelCase__ = name.replace(A , '''''' ) filtered_module_names.append(A ) return filtered_module_names def _snake_case ( A ) -> Optional[int]: for m in model.modules(): if isinstance(A , bnb.nn.Linearabit ): return True return False def _snake_case ( A ) -> Union[str, Any]: return next(parameter.parameters() ).device def _snake_case ( A , A , A , A , A , A , A ) -> Any: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(A , A , 0 , dtype=A , value=A ) lowerCAmelCase__ = param_name lowerCAmelCase__ = model if "." in tensor_name: lowerCAmelCase__ = tensor_name.split('''.''' ) for split in splits[:-1]: lowerCAmelCase__ = getattr(A , A ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) lowerCAmelCase__ = new_module lowerCAmelCase__ = splits[-1] # offload weights lowerCAmelCase__ = False offload_weight(module._parameters[tensor_name] , A , A , index=A ) if hasattr(module._parameters[tensor_name] , '''SCB''' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A , index=A , ) else: offload_weight(A , A , A , index=A ) offload_weight(A , param_name.replace('''weight''' , '''SCB''' ) , A , index=A ) set_module_tensor_to_device(A , A , '''meta''' , dtype=A , value=torch.empty(*param.size() ) )
90
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCAmelCase = { '''configuration_conditional_detr''': [ '''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConditionalDetrConfig''', '''ConditionalDetrOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''ConditionalDetrFeatureExtractor'''] __UpperCAmelCase = ['''ConditionalDetrImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConditionalDetrForObjectDetection''', '''ConditionalDetrForSegmentation''', '''ConditionalDetrModel''', '''ConditionalDetrPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
90
'''simple docstring''' from collections.abc import Callable import numpy as np def _snake_case ( A , A , A , A , A ) -> np.array: lowerCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) ) lowerCAmelCase__ = np.zeros((n + 1,) ) lowerCAmelCase__ = ya lowerCAmelCase__ = xa for k in range(A ): lowerCAmelCase__ = y[k] + step_size * ode_func(A , y[k] ) lowerCAmelCase__ = y[k] + ( (step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
90
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''google/vivit-b-16x2-kinetics400''': ( '''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json''' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class a__ ( a__ ): '''simple docstring''' lowercase__ : List[str] = "vivit" def __init__( self , lowerCamelCase_=2_24 , lowerCamelCase_=32 , lowerCamelCase_=[2, 16, 16] , lowerCamelCase_=3 , lowerCamelCase_=7_68 , lowerCamelCase_=12 , lowerCamelCase_=12 , lowerCamelCase_=30_72 , lowerCamelCase_="gelu_fast" , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-06 , lowerCamelCase_=True , **lowerCamelCase_ , ) -> Any: lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = initializer_range lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = image_size lowerCAmelCase__ = num_frames lowerCAmelCase__ = tubelet_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = qkv_bias super().__init__(**lowerCamelCase_ )
90
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class a__ : '''simple docstring''' def __init__( self , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=64 , lowerCamelCase_=None ) -> Dict: lowerCAmelCase__ = np.random.default_rng(lowerCamelCase_ ) lowerCAmelCase__ = length lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa ) lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self ) -> Any: return self.length def __getitem__( self , lowerCamelCase_ ) -> List[str]: return {"x": self.x[i], "y": self.y[i]} class a__ ( torch.nn.Module ): '''simple docstring''' def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> List[Any]: super().__init__() lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase__ = True def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Optional[Any]: if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) lowerCAmelCase__ = False return x * self.a[0] + self.b[0] class a__ ( torch.nn.Module ): '''simple docstring''' def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> Any: super().__init__() lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() ) lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() ) lowerCAmelCase__ = True def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any: if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) lowerCAmelCase__ = False return x * self.a + self.b def _snake_case ( A , A = 16 ) -> Any: from datasets import load_dataset from transformers import AutoTokenizer lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowerCAmelCase__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} lowerCAmelCase__ = load_dataset('''csv''' , data_files=A ) lowerCAmelCase__ = datasets['''train'''].unique('''label''' ) lowerCAmelCase__ = {v: i for i, v in enumerate(A )} def tokenize_function(A ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase__ = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A , padding='''max_length''' ) if "label" in examples: lowerCAmelCase__ = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase__ = datasets.map( A , batched=A , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(A ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. lowerCAmelCase__ = DataLoader(tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=2 ) lowerCAmelCase__ = DataLoader(tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=1 ) return train_dataloader, eval_dataloader
90
1
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) __UpperCAmelCase = logging.getLogger() def _snake_case ( ) -> int: lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''-f''' ) lowerCAmelCase__ = parser.parse_args() return args.f def _snake_case ( A ) -> Tuple: lowerCAmelCase__ = {} lowerCAmelCase__ = os.path.join(A , '''all_results.json''' ) if os.path.exists(A ): with open(A , '''r''' ) as f: lowerCAmelCase__ = json.load(A ) else: raise ValueError(F"""can't find {path}""" ) return results def _snake_case ( ) -> Any: lowerCAmelCase__ = torch.cuda.is_available() and torch_device == '''cuda''' return is_using_cuda and is_apex_available() __UpperCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class a__ ( a__ ): '''simple docstring''' @classmethod def __SCREAMING_SNAKE_CASE ( cls ) -> Any: # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU lowerCAmelCase__ = tempfile.mkdtemp() lowerCAmelCase__ = os.path.join(cls.tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) lowerCAmelCase__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def __SCREAMING_SNAKE_CASE ( cls ) -> Tuple: shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: lowerCAmelCase__ = self.get_auto_remove_tmp_dir() lowerCAmelCase__ = F""" {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking """.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) lowerCAmelCase__ = get_results(lowerCamelCase_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''glue_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: lowerCAmelCase__ = self.get_auto_remove_tmp_dir() lowerCAmelCase__ = F""" {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking """.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) lowerCAmelCase__ = get_results(lowerCamelCase_ ) self.assertLess(result['''perplexity'''] , 1_00 ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''clm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: lowerCAmelCase__ = self.get_auto_remove_tmp_dir() lowerCAmelCase__ = F""" {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) lowerCAmelCase__ = get_results(lowerCamelCase_ ) self.assertLess(result['''perplexity'''] , 42 ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''mlm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu lowerCAmelCase__ = 7 if get_gpu_count() > 1 else 2 lowerCAmelCase__ = self.get_auto_remove_tmp_dir() lowerCAmelCase__ = F""" {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) lowerCAmelCase__ = get_results(lowerCamelCase_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertLess(result['''train_loss'''] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''ner_no_trainer''' ) ) ) @unittest.skip(reason='''Fix me @muellerzr''' ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = self.get_auto_remove_tmp_dir() lowerCAmelCase__ = F""" {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) lowerCAmelCase__ = get_results(lowerCamelCase_ ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['''eval_f1'''] , 28 ) self.assertGreaterEqual(result['''eval_exact'''] , 28 ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''qa_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = self.get_auto_remove_tmp_dir() lowerCAmelCase__ = F""" {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking """.split() run_command(self._launch_args + testargs ) lowerCAmelCase__ = get_results(lowerCamelCase_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''swag_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = self.get_auto_remove_tmp_dir() lowerCAmelCase__ = F""" {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) lowerCAmelCase__ = get_results(lowerCamelCase_ ) self.assertGreaterEqual(result['''eval_rouge1'''] , 10 ) self.assertGreaterEqual(result['''eval_rouge2'''] , 2 ) self.assertGreaterEqual(result['''eval_rougeL'''] , 7 ) self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''summarization_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = self.get_auto_remove_tmp_dir() lowerCAmelCase__ = F""" {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) lowerCAmelCase__ = get_results(lowerCamelCase_ ) self.assertGreaterEqual(result['''eval_bleu'''] , 30 ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''translation_no_trainer''' ) ) ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: lowerCAmelCase__ = logging.StreamHandler(sys.stdout ) logger.addHandler(lowerCamelCase_ ) lowerCAmelCase__ = self.get_auto_remove_tmp_dir() lowerCAmelCase__ = F""" {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch """.split() run_command(self._launch_args + testargs ) lowerCAmelCase__ = get_results(lowerCamelCase_ ) self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: lowerCAmelCase__ = self.get_auto_remove_tmp_dir() lowerCAmelCase__ = F""" {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 """.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) lowerCAmelCase__ = get_results(lowerCamelCase_ ) # The base model scores a 25% self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''step_1''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''image_classification_no_trainer''' ) ) )
90
'''simple docstring''' import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __UpperCAmelCase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def _snake_case ( A , A=None ) -> Optional[Any]: require_version(deps[pkg] , A )
90
1
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class a__ ( a__ , unittest.TestCase ): '''simple docstring''' lowercase__ : Optional[Any] = XLNetTokenizer lowercase__ : int = XLNetTokenizerFast lowercase__ : Optional[int] = True lowercase__ : Optional[int] = True def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = XLNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: lowerCAmelCase__ = '''<s>''' lowerCAmelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''<eod>''' ) self.assertEqual(len(lowerCamelCase_ ) , 10_06 ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = XLNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [2_85, 46, 10, 1_70, 3_82] ) lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = XLNetTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + '''''', '''i''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ] , ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: lowerCAmelCase__ = XLNetTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ] , ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' ) lowerCAmelCase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def __SCREAMING_SNAKE_CASE ( self ) -> Any: # fmt: off lowerCAmelCase__ = {'''input_ids''': [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase_ , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
90
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( A , A=False , A=False , A=False ) -> Union[str, Any]: lowerCAmelCase__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") ) # embeddings rename_keys.extend( [ # text embeddings ('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''), ( '''text_embeddings.position_embeddings.weight''', '''vilt.embeddings.text_embeddings.position_embeddings.weight''', ), ('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''), ( '''text_embeddings.token_type_embeddings.weight''', '''vilt.embeddings.text_embeddings.token_type_embeddings.weight''', ), ('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''), ('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''), # patch embeddings ('''transformer.cls_token''', '''vilt.embeddings.cls_token'''), ('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''), ('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''), ('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''), # token type embeddings ('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''), ] ) # final layernorm + pooler rename_keys.extend( [ ('''transformer.norm.weight''', '''vilt.layernorm.weight'''), ('''transformer.norm.bias''', '''vilt.layernorm.bias'''), ('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''), ('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('''vqa_classifier.0.weight''', '''classifier.0.weight'''), ('''vqa_classifier.0.bias''', '''classifier.0.bias'''), ('''vqa_classifier.1.weight''', '''classifier.1.weight'''), ('''vqa_classifier.1.bias''', '''classifier.1.bias'''), ('''vqa_classifier.3.weight''', '''classifier.3.weight'''), ('''vqa_classifier.3.bias''', '''classifier.3.bias'''), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''), ('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''), ('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''), ('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''), ('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''), ('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''), ] ) else: pass return rename_keys def _snake_case ( A , A ) -> List[str]: for i in range(config.num_hidden_layers ): lowerCAmelCase__ = '''vilt.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" ) lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase__ = in_proj_weight[ : config.hidden_size, : ] lowerCAmelCase__ = in_proj_bias[: config.hidden_size] lowerCAmelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCAmelCase__ = in_proj_bias[-config.hidden_size :] def _snake_case ( A ) -> List[str]: lowerCAmelCase__ = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(A , A ) def _snake_case ( A , A , A ) -> str: lowerCAmelCase__ = dct.pop(A ) lowerCAmelCase__ = val @torch.no_grad() def _snake_case ( A , A ) -> Any: lowerCAmelCase__ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=A ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False if "vqa" in checkpoint_url: lowerCAmelCase__ = True lowerCAmelCase__ = 3129 lowerCAmelCase__ = '''huggingface/label-files''' lowerCAmelCase__ = '''vqa2-id2label.json''' lowerCAmelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase__ = {int(A ): v for k, v in idalabel.items()} lowerCAmelCase__ = idalabel lowerCAmelCase__ = {v: k for k, v in idalabel.items()} lowerCAmelCase__ = ViltForQuestionAnswering(A ) elif "nlvr" in checkpoint_url: lowerCAmelCase__ = True lowerCAmelCase__ = 2 lowerCAmelCase__ = {0: '''False''', 1: '''True'''} lowerCAmelCase__ = {v: k for k, v in config.idalabel.items()} lowerCAmelCase__ = 3 lowerCAmelCase__ = ViltForImagesAndTextClassification(A ) elif "irtr" in checkpoint_url: lowerCAmelCase__ = True lowerCAmelCase__ = ViltForImageAndTextRetrieval(A ) elif "mlm_itm" in checkpoint_url: lowerCAmelCase__ = True lowerCAmelCase__ = ViltForMaskedLM(A ) else: raise ValueError('''Unknown model type''' ) # load state_dict of original model, remove and rename some keys lowerCAmelCase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''state_dict'''] lowerCAmelCase__ = create_rename_keys(A , A , A , A ) for src, dest in rename_keys: rename_key(A , A , A ) read_in_q_k_v(A , A ) if mlm_model or irtr_model: lowerCAmelCase__ = ['''itm_score.fc.weight''', '''itm_score.fc.bias'''] for k in ignore_keys: state_dict.pop(A , A ) # load state dict into HuggingFace model model.eval() if mlm_model: lowerCAmelCase__ , lowerCAmelCase__ = model.load_state_dict(A , strict=A ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(A ) # Define processor lowerCAmelCase__ = ViltImageProcessor(size=384 ) lowerCAmelCase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' ) lowerCAmelCase__ = ViltProcessor(A , A ) # Forward pass on example inputs (image + text) if nlvr_model: lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw ) lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw ) lowerCAmelCase__ = ( '''The left image contains twice the number of dogs as the right image, and at least two dogs in total are''' ''' standing.''' ) lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' ) lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' ) lowerCAmelCase__ = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: lowerCAmelCase__ = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=A ).raw ) if mlm_model: lowerCAmelCase__ = '''a bunch of [MASK] laying on a [MASK].''' else: lowerCAmelCase__ = '''How many cats are there?''' lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' ) lowerCAmelCase__ = model(**A ) # Verify outputs if mlm_model: lowerCAmelCase__ = torch.Size([1, 11, 30522] ) lowerCAmelCase__ = torch.tensor([-12.5_061, -12.5_123, -12.5_174] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 ) # verify masked token prediction equals "cats" lowerCAmelCase__ = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: lowerCAmelCase__ = torch.Size([1, 3129] ) lowerCAmelCase__ = torch.tensor([-15.9_495, -18.1_472, -10.3_041] ) assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 ) # verify vqa prediction equals "2" lowerCAmelCase__ = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: lowerCAmelCase__ = torch.Size([1, 2] ) lowerCAmelCase__ = torch.tensor([-2.8_721, 2.1_291] ) assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(A ).mkdir(exist_ok=A ) print(F"""Saving model and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(A ) processor.save_pretrained(A ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) __UpperCAmelCase = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
90
1
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class a__ : '''simple docstring''' def __init__( self , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=64 , lowerCamelCase_=None ) -> Dict: lowerCAmelCase__ = np.random.default_rng(lowerCamelCase_ ) lowerCAmelCase__ = length lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa ) lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self ) -> Any: return self.length def __getitem__( self , lowerCamelCase_ ) -> List[str]: return {"x": self.x[i], "y": self.y[i]} class a__ ( torch.nn.Module ): '''simple docstring''' def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> List[Any]: super().__init__() lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase__ = True def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Optional[Any]: if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) lowerCAmelCase__ = False return x * self.a[0] + self.b[0] class a__ ( torch.nn.Module ): '''simple docstring''' def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> Any: super().__init__() lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() ) lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() ) lowerCAmelCase__ = True def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any: if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) lowerCAmelCase__ = False return x * self.a + self.b def _snake_case ( A , A = 16 ) -> Any: from datasets import load_dataset from transformers import AutoTokenizer lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowerCAmelCase__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} lowerCAmelCase__ = load_dataset('''csv''' , data_files=A ) lowerCAmelCase__ = datasets['''train'''].unique('''label''' ) lowerCAmelCase__ = {v: i for i, v in enumerate(A )} def tokenize_function(A ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase__ = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A , padding='''max_length''' ) if "label" in examples: lowerCAmelCase__ = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase__ = datasets.map( A , batched=A , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(A ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. lowerCAmelCase__ = DataLoader(tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=2 ) lowerCAmelCase__ = DataLoader(tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=1 ) return train_dataloader, eval_dataloader
90
'''simple docstring''' import re def _snake_case ( A ) -> bool: lowerCAmelCase__ = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' ) if match := re.search(A , A ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator('''+918827897895'''))
90
1
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness __UpperCAmelCase = '''\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } ''' __UpperCAmelCase = '''\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). ''' __UpperCAmelCase = ''' Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric("code_eval") >>> test_cases = ["assert add(2,3)==5"] >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {\'pass@1\': 0.5, \'pass@2\': 1.0} ''' __UpperCAmelCase = ''' ################################################################################ !!!WARNING!!! ################################################################################ The "code_eval" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this with: >>> import os >>> os.environ["HF_ALLOW_CODE_EVAL"] = "1" ################################################################################\ ''' __UpperCAmelCase = '''The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ) ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=[1, 10, 1_00] , lowerCamelCase_=4 , lowerCamelCase_=3.0 ) -> Any: if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError('''This metric is currently not supported on Windows.''' ) with ThreadPoolExecutor(max_workers=lowerCamelCase_ ) as executor: lowerCAmelCase__ = [] lowerCAmelCase__ = Counter() lowerCAmelCase__ = 0 lowerCAmelCase__ = defaultdict(lowerCamelCase_ ) for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ ) ): for candidate in candidates: lowerCAmelCase__ = candidate + '''\n''' + test_case lowerCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id]) lowerCAmelCase__ = executor.submit(lowerCamelCase_ , *lowerCamelCase_ ) futures.append(lowerCamelCase_ ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(lowerCamelCase_ ): lowerCAmelCase__ = future.result() results[result["task_id"]].append((result['''completion_id'''], result) ) lowerCAmelCase__ , lowerCAmelCase__ = [], [] for result in results.values(): result.sort() lowerCAmelCase__ = [r[1]['''passed'''] for r in result] total.append(len(lowerCamelCase_ ) ) correct.append(sum(lowerCamelCase_ ) ) lowerCAmelCase__ = np.array(lowerCamelCase_ ) lowerCAmelCase__ = np.array(lowerCamelCase_ ) lowerCAmelCase__ = k lowerCAmelCase__ = {F"""pass@{k}""": estimate_pass_at_k(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _snake_case ( A , A , A ) -> List[str]: def estimator(A , A , A ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(A , A ): lowerCAmelCase__ = itertools.repeat(A , len(A ) ) else: assert len(A ) == len(A ) lowerCAmelCase__ = iter(A ) return np.array([estimator(int(A ) , int(A ) , A ) for n, c in zip(A , A )] )
90
'''simple docstring''' import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'''vocab_file''': '''vocab.txt'''} __UpperCAmelCase = { '''vocab_file''': { '''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''', '''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''', }, } __UpperCAmelCase = { '''facebook/esm2_t6_8M_UR50D''': 1_024, '''facebook/esm2_t12_35M_UR50D''': 1_024, } def _snake_case ( A ) -> Optional[Any]: with open(A , '''r''' ) as f: lowerCAmelCase__ = f.read().splitlines() return [l.strip() for l in lines] class a__ ( a__ ): '''simple docstring''' lowercase__ : Optional[Any] = VOCAB_FILES_NAMES lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_="<eos>" , **lowerCamelCase_ , ) -> Tuple: super().__init__(**lowerCamelCase_ ) lowerCAmelCase__ = load_vocab_file(lowerCamelCase_ ) lowerCAmelCase__ = dict(enumerate(self.all_tokens ) ) lowerCAmelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )} lowerCAmelCase__ = unk_token lowerCAmelCase__ = cls_token lowerCAmelCase__ = pad_token lowerCAmelCase__ = mask_token lowerCAmelCase__ = eos_token lowerCAmelCase__ = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str: return self._id_to_token.get(lowerCamelCase_ , self.unk_token ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]: return text.split() def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False ) -> Dict: return len(self._id_to_token ) def __SCREAMING_SNAKE_CASE ( self ) -> int: return {token: i for i, token in enumerate(self.all_tokens )} def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str: return self._id_to_token.get(lowerCamelCase_ , self.unk_token ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]: lowerCAmelCase__ = [self.cls_token_id] lowerCAmelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] lowerCAmelCase__ = [1] + ([0] * len(lowerCamelCase_ )) + [1] if token_ids_a is not None: mask += [0] * len(lowerCamelCase_ ) + [1] return mask def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]: lowerCAmelCase__ = os.path.join(lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' ) with open(lowerCamelCase_ , '''w''' ) as f: f.write('''\n'''.join(self.all_tokens ) ) return (vocab_file,) @property def __SCREAMING_SNAKE_CASE ( self ) -> int: return self.get_vocab_size(with_added_tokens=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False ) -> int: return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ )
90
1
'''simple docstring''' def _snake_case ( A ) -> str: return "".join([hex(A )[2:].zfill(2 ).upper() for byte in list(A )] ) def _snake_case ( A ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(A ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(A ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(A ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
90
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a__ ( a__ , a__ , a__ , unittest.TestCase ): '''simple docstring''' lowercase__ : Optional[Any] = AltDiffusionPipeline lowercase__ : Dict = TEXT_TO_IMAGE_PARAMS lowercase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS lowercase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS lowercase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS def __SCREAMING_SNAKE_CASE ( self ) -> str: torch.manual_seed(0 ) lowerCAmelCase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) lowerCAmelCase__ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , ) torch.manual_seed(0 ) lowerCAmelCase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) lowerCAmelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , ) lowerCAmelCase__ = CLIPTextModel(lowerCamelCase_ ) lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) lowerCAmelCase__ = 77 lowerCAmelCase__ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> List[str]: if str(lowerCamelCase_ ).startswith('''mps''' ): lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ ) else: lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) lowerCAmelCase__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ = self.get_dummy_components() torch.manual_seed(0 ) lowerCAmelCase__ = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , ) # TODO: remove after fixing the non-deterministic text encoder lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ ) lowerCAmelCase__ = text_encoder lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ ) lowerCAmelCase__ = '''A photo of an astronaut''' lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ = np.array( [0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ = self.get_dummy_components() lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) torch.manual_seed(0 ) lowerCAmelCase__ = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , ) # TODO: remove after fixing the non-deterministic text encoder lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ ) lowerCAmelCase__ = text_encoder lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ = np.array( [0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: # make sure here that pndm scheduler skips prk lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = '''A painting of a squirrel eating a burger''' lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __SCREAMING_SNAKE_CASE ( self ) -> Any: lowerCAmelCase__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' ) lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = '''A painting of a squirrel eating a burger''' lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
90
1
'''simple docstring''' import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( '''The `inpainting.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionInpaintPipeline` instead.''' )
90
'''simple docstring''' def _snake_case ( A , A ) -> int: return x if y == 0 else greatest_common_divisor(A , x % y ) def _snake_case ( A , A ) -> int: return (x * y) // greatest_common_divisor(A , A ) def _snake_case ( A = 20 ) -> int: lowerCAmelCase__ = 1 for i in range(1 , n + 1 ): lowerCAmelCase__ = lcm(A , A ) return g if __name__ == "__main__": print(f"""{solution() = }""")
90
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''', # See all WavLM models at https://huggingface.co/models?filter=wavlm } class a__ ( a__ ): '''simple docstring''' lowercase__ : List[str] = "wavlm" def __init__( self , lowerCamelCase_=32 , lowerCamelCase_=7_68 , lowerCamelCase_=12 , lowerCamelCase_=12 , lowerCamelCase_=30_72 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-5 , lowerCamelCase_="group" , lowerCamelCase_="gelu" , lowerCamelCase_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCamelCase_=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase_=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase_=False , lowerCamelCase_=1_28 , lowerCamelCase_=16 , lowerCamelCase_=3_20 , lowerCamelCase_=8_00 , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=0.05 , lowerCamelCase_=10 , lowerCamelCase_=2 , lowerCamelCase_=0.0 , lowerCamelCase_=10 , lowerCamelCase_=3_20 , lowerCamelCase_=2 , lowerCamelCase_=0.1 , lowerCamelCase_=1_00 , lowerCamelCase_=2_56 , lowerCamelCase_=2_56 , lowerCamelCase_=0.1 , lowerCamelCase_="mean" , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=2_56 , lowerCamelCase_=(5_12, 5_12, 5_12, 5_12, 15_00) , lowerCamelCase_=(5, 3, 3, 1, 1) , lowerCamelCase_=(1, 2, 3, 1, 1) , lowerCamelCase_=5_12 , lowerCamelCase_=80 , lowerCamelCase_=0 , lowerCamelCase_=1 , lowerCamelCase_=2 , lowerCamelCase_=False , lowerCamelCase_=3 , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Optional[int]: super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ ) lowerCAmelCase__ = hidden_size lowerCAmelCase__ = feat_extract_norm lowerCAmelCase__ = feat_extract_activation lowerCAmelCase__ = list(lowerCamelCase_ ) lowerCAmelCase__ = list(lowerCamelCase_ ) lowerCAmelCase__ = list(lowerCamelCase_ ) lowerCAmelCase__ = conv_bias lowerCAmelCase__ = num_buckets lowerCAmelCase__ = max_bucket_distance lowerCAmelCase__ = num_conv_pos_embeddings lowerCAmelCase__ = num_conv_pos_embedding_groups lowerCAmelCase__ = len(self.conv_dim ) lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_act lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = hidden_dropout lowerCAmelCase__ = attention_dropout lowerCAmelCase__ = activation_dropout lowerCAmelCase__ = feat_proj_dropout lowerCAmelCase__ = final_dropout lowerCAmelCase__ = layerdrop lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = initializer_range lowerCAmelCase__ = num_ctc_classes lowerCAmelCase__ = vocab_size lowerCAmelCase__ = do_stable_layer_norm lowerCAmelCase__ = use_weighted_layer_sum lowerCAmelCase__ = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase__ = apply_spec_augment lowerCAmelCase__ = mask_time_prob lowerCAmelCase__ = mask_time_length lowerCAmelCase__ = mask_time_min_masks lowerCAmelCase__ = mask_feature_prob lowerCAmelCase__ = mask_feature_length # parameters for pretraining with codevector quantized representations lowerCAmelCase__ = num_codevectors_per_group lowerCAmelCase__ = num_codevector_groups lowerCAmelCase__ = contrastive_logits_temperature lowerCAmelCase__ = num_negatives lowerCAmelCase__ = codevector_dim lowerCAmelCase__ = proj_codevector_dim lowerCAmelCase__ = diversity_loss_weight # ctc loss lowerCAmelCase__ = ctc_loss_reduction lowerCAmelCase__ = ctc_zero_infinity # adapter lowerCAmelCase__ = add_adapter lowerCAmelCase__ = adapter_kernel_size lowerCAmelCase__ = adapter_stride lowerCAmelCase__ = num_adapter_layers lowerCAmelCase__ = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowerCAmelCase__ = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowerCAmelCase__ = list(lowerCamelCase_ ) lowerCAmelCase__ = list(lowerCamelCase_ ) lowerCAmelCase__ = list(lowerCamelCase_ ) lowerCAmelCase__ = xvector_output_dim @property def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: return functools.reduce(operator.mul , self.conv_stride , 1 )
90
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu __UpperCAmelCase = [ '''EAGER''', '''AOT_EAGER''', '''INDUCTOR''', '''NVFUSER''', '''AOT_NVFUSER''', '''AOT_CUDAGRAPHS''', '''OFI''', '''FX2TRT''', '''ONNXRT''', '''IPEX''', ] def _snake_case ( A , A=None , A=None , A=None ) -> Union[str, Any]: lowerCAmelCase__ = True while ask_again: lowerCAmelCase__ = input(A ) try: if default is not None and len(A ) == 0: return default return convert_value(A ) if convert_value is not None else result except Exception: if error_message is not None: print(A ) def _snake_case ( A , A=[] , A=None , A=0 ) -> List[Any]: lowerCAmelCase__ = BulletMenu(A , A ) lowerCAmelCase__ = menu.run(default_choice=A ) return convert_value(A ) if convert_value is not None else result def _snake_case ( A ) -> Tuple: lowerCAmelCase__ = int(A ) return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] ) def _snake_case ( A ) -> Union[str, Any]: lowerCAmelCase__ = int(A ) return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] ) def _snake_case ( A ) -> str: lowerCAmelCase__ = int(A ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def _snake_case ( A ) -> Tuple: lowerCAmelCase__ = int(A ) return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] ) def _snake_case ( A ) -> Union[str, Any]: lowerCAmelCase__ = int(A ) return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] ) def _snake_case ( A ) -> List[str]: return {"yes": True, "no": False}[value.lower()] class a__ ( argparse.RawDescriptionHelpFormatter ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict: lowerCAmelCase__ = super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = usage.replace('''<command> [<args>] ''' , '''''' ) return usage
90
1
'''simple docstring''' import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', } __UpperCAmelCase = { '''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''}, '''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''}, } __UpperCAmelCase = { '''ctrl''': 256, } __UpperCAmelCase = { '''Pregnancy''': 168_629, '''Christianity''': 7_675, '''Explain''': 106_423, '''Fitness''': 63_440, '''Saving''': 63_163, '''Ask''': 27_171, '''Ass''': 95_985, '''Joke''': 163_509, '''Questions''': 45_622, '''Thoughts''': 49_605, '''Retail''': 52_342, '''Feminism''': 164_338, '''Writing''': 11_992, '''Atheism''': 192_263, '''Netflix''': 48_616, '''Computing''': 39_639, '''Opinion''': 43_213, '''Alone''': 44_967, '''Funny''': 58_917, '''Gaming''': 40_358, '''Human''': 4_088, '''India''': 1_331, '''Joker''': 77_138, '''Diet''': 36_206, '''Legal''': 11_859, '''Norman''': 4_939, '''Tip''': 72_689, '''Weight''': 52_343, '''Movies''': 46_273, '''Running''': 23_425, '''Science''': 2_090, '''Horror''': 37_793, '''Confession''': 60_572, '''Finance''': 12_250, '''Politics''': 16_360, '''Scary''': 191_985, '''Support''': 12_654, '''Technologies''': 32_516, '''Teenage''': 66_160, '''Event''': 32_769, '''Learned''': 67_460, '''Notion''': 182_770, '''Wikipedia''': 37_583, '''Books''': 6_665, '''Extract''': 76_050, '''Confessions''': 102_701, '''Conspiracy''': 75_932, '''Links''': 63_674, '''Narcissus''': 150_425, '''Relationship''': 54_766, '''Relationships''': 134_796, '''Reviews''': 41_671, '''News''': 4_256, '''Translation''': 26_820, '''multilingual''': 128_406, } def _snake_case ( A ) -> Optional[int]: lowerCAmelCase__ = set() lowerCAmelCase__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase__ = char lowerCAmelCase__ = set(A ) return pairs class a__ ( a__ ): '''simple docstring''' lowercase__ : List[Any] = VOCAB_FILES_NAMES lowercase__ : Dict = PRETRAINED_VOCAB_FILES_MAP lowercase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ : Dict = CONTROL_CODES def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="<unk>" , **lowerCamelCase_ ) -> List[Any]: super().__init__(unk_token=lowerCamelCase_ , **lowerCamelCase_ ) with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle: lowerCAmelCase__ = json.load(lowerCamelCase_ ) lowerCAmelCase__ = {v: k for k, v in self.encoder.items()} with open(lowerCamelCase_ , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase__ = merges_handle.read().split('''\n''' )[1:-1] lowerCAmelCase__ = [tuple(merge.split() ) for merge in merges] lowerCAmelCase__ = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) lowerCAmelCase__ = {} @property def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: return len(self.encoder ) def __SCREAMING_SNAKE_CASE ( self ) -> int: return dict(self.encoder , **self.added_tokens_encoder ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[Any]: if token in self.cache: return self.cache[token] lowerCAmelCase__ = tuple(lowerCamelCase_ ) lowerCAmelCase__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowerCAmelCase__ = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: lowerCAmelCase__ = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase__ , lowerCAmelCase__ = bigram lowerCAmelCase__ = [] lowerCAmelCase__ = 0 while i < len(lowerCamelCase_ ): try: lowerCAmelCase__ = word.index(lowerCamelCase_ , lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase__ = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase__ = tuple(lowerCamelCase_ ) lowerCAmelCase__ = new_word if len(lowerCamelCase_ ) == 1: break else: lowerCAmelCase__ = get_pairs(lowerCamelCase_ ) lowerCAmelCase__ = '''@@ '''.join(lowerCamelCase_ ) lowerCAmelCase__ = word[:-4] lowerCAmelCase__ = word return word def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: lowerCAmelCase__ = [] lowerCAmelCase__ = re.findall(r'''\S+\n?''' , lowerCamelCase_ ) for token in words: split_tokens.extend(list(self.bpe(lowerCamelCase_ ).split(''' ''' ) ) ) return split_tokens def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Union[str, Any]: return self.decoder.get(lowerCamelCase_ , self.unk_token ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tuple: lowerCAmelCase__ = ''' '''.join(lowerCamelCase_ ).replace('''@@ ''' , '''''' ).strip() return out_string def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple[str]: if not os.path.isdir(lowerCamelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ = os.path.join( lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase__ = os.path.join( lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' ) lowerCAmelCase__ = 0 with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) lowerCAmelCase__ = token_index writer.write(''' '''.join(lowerCamelCase_ ) + '''\n''' ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
90
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class a__ ( a__ ): '''simple docstring''' lowercase__ : torch.FloatTensor class a__ ( a__ , a__ ): '''simple docstring''' @register_to_config def __init__( self , lowerCamelCase_ = 3 , lowerCamelCase_ = 3 , lowerCamelCase_ = ("DownEncoderBlock2D",) , lowerCamelCase_ = ("UpDecoderBlock2D",) , lowerCamelCase_ = (64,) , lowerCamelCase_ = 1 , lowerCamelCase_ = "silu" , lowerCamelCase_ = 3 , lowerCamelCase_ = 32 , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 32 , lowerCamelCase_ = None , lowerCamelCase_ = 0.18_215 , lowerCamelCase_ = "group" , ) -> Union[str, Any]: super().__init__() # pass init params to Encoder lowerCAmelCase__ = Encoder( in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , ) lowerCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 ) lowerCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ ) lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 ) # pass init params to Decoder lowerCAmelCase__ = Decoder( in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , ) @apply_forward_hook def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> VQEncoderOutput: lowerCAmelCase__ = self.encoder(lowerCamelCase_ ) lowerCAmelCase__ = self.quant_conv(lowerCamelCase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowerCamelCase_ ) @apply_forward_hook def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: # also go through quantization layer if not force_not_quantize: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.quantize(lowerCamelCase_ ) else: lowerCAmelCase__ = h lowerCAmelCase__ = self.post_quant_conv(lowerCamelCase_ ) lowerCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == '''spatial''' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: lowerCAmelCase__ = sample lowerCAmelCase__ = self.encode(lowerCamelCase_ ).latents lowerCAmelCase__ = self.decode(lowerCamelCase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase_ )
90
1
'''simple docstring''' from collections.abc import Sequence def _snake_case ( A , A ) -> float: return sum(c * (x**i) for i, c in enumerate(A ) ) def _snake_case ( A , A ) -> float: lowerCAmelCase__ = 0.0 for coeff in reversed(A ): lowerCAmelCase__ = result * x + coeff return result if __name__ == "__main__": __UpperCAmelCase = (0.0, 0.0, 5.0, 9.3, 7.0) __UpperCAmelCase = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
90
'''simple docstring''' from __future__ import annotations __UpperCAmelCase = list[list[int]] # assigning initial values to the grid __UpperCAmelCase = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __UpperCAmelCase = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def _snake_case ( A , A , A , A ) -> bool: for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def _snake_case ( A ) -> tuple[int, int] | None: for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def _snake_case ( A ) -> Matrix | None: if location := find_empty_location(A ): lowerCAmelCase__ , lowerCAmelCase__ = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(A , A , A , A ): lowerCAmelCase__ = digit if sudoku(A ) is not None: return grid lowerCAmelCase__ = 0 return None def _snake_case ( A ) -> None: for row in grid: for cell in row: print(A , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('''\nExample grid:\n''' + '''=''' * 20) print_solution(example_grid) print('''\nExample grid solution:''') __UpperCAmelCase = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('''Cannot find a solution.''')
90
1
'''simple docstring''' def _snake_case ( A = 50 ) -> int: lowerCAmelCase__ = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f"""{solution() = }""")
90
'''simple docstring''' def _snake_case ( A ) -> int: if n == 1 or not isinstance(A , A ): return 0 elif n == 2: return 1 else: lowerCAmelCase__ = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _snake_case ( A ) -> int: lowerCAmelCase__ = 0 lowerCAmelCase__ = 2 while digits < n: index += 1 lowerCAmelCase__ = len(str(fibonacci(A ) ) ) return index def _snake_case ( A = 1000 ) -> int: return fibonacci_digits_index(A ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
90
1
'''simple docstring''' def _snake_case ( A , A ) -> Tuple: lowerCAmelCase__ = 0 lowerCAmelCase__ = len(A ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None lowerCAmelCase__ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(A ): return None lowerCAmelCase__ = sorted_collection[point] if current_item == item: return point else: if point < left: lowerCAmelCase__ = left lowerCAmelCase__ = point elif point > right: lowerCAmelCase__ = right lowerCAmelCase__ = point else: if item < current_item: lowerCAmelCase__ = point - 1 else: lowerCAmelCase__ = point + 1 return None def _snake_case ( A , A , A , A ) -> List[str]: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None lowerCAmelCase__ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(A ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(A , A , A , A ) elif point > right: return interpolation_search_by_recursion(A , A , A , A ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( A , A , A , point - 1 ) else: return interpolation_search_by_recursion( A , A , point + 1 , A ) def _snake_case ( A ) -> Dict: if collection != sorted(A ): raise ValueError('''Collection must be ascending sorted''' ) return True if __name__ == "__main__": import sys __UpperCAmelCase = 0 if debug == 1: __UpperCAmelCase = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit('''Sequence must be ascending sorted to apply interpolation search''') __UpperCAmelCase = 67 __UpperCAmelCase = interpolation_search(collection, target) if result is not None: print(f"""{target} found at positions: {result}""") else: print('''Not found''')
90
'''simple docstring''' from __future__ import annotations from random import choice def _snake_case ( A ) -> int: return choice(A ) def _snake_case ( A , A ) -> int: lowerCAmelCase__ = random_pivot(A ) # partition based on pivot # linear time lowerCAmelCase__ = [e for e in lst if e < pivot] lowerCAmelCase__ = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(A ) == k - 1: return pivot # pivot is in elements bigger than k elif len(A ) < k - 1: return kth_number(A , k - len(A ) - 1 ) # pivot is in elements smaller than k else: return kth_number(A , A ) if __name__ == "__main__": import doctest doctest.testmod()
90
1
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 __UpperCAmelCase = get_tests_dir('''fixtures''') __UpperCAmelCase = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''') __UpperCAmelCase = get_tests_dir('''fixtures/dummy-config.json''') class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> Dict: lowerCAmelCase__ = 0 def __SCREAMING_SNAKE_CASE ( self ) -> Any: lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> str: with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict() config_dict.pop('''feature_extractor_type''' ) lowerCAmelCase__ = WavaVecaFeatureExtractor(**lowerCamelCase_ ) # save in new folder model_config.save_pretrained(lowerCamelCase_ ) config.save_pretrained(lowerCamelCase_ ) lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) # make sure private variable is not incorrectly saved lowerCAmelCase__ = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: with self.assertRaisesRegex( lowerCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ): lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained('''bert-base''' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: with self.assertRaisesRegex( lowerCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision='''aaaaaa''' ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: with self.assertRaisesRegex( lowerCamelCase_ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowerCamelCase_ ): lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase_ ): lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCamelCase_ ) lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase_ ) lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) def __SCREAMING_SNAKE_CASE ( self ) -> int: try: AutoConfig.register('''custom''' , lowerCamelCase_ ) AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase_ ): AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API lowerCAmelCase__ = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase_ ) lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def __SCREAMING_SNAKE_CASE ( self ) -> str: class a__ ( a__ ): '''simple docstring''' lowercase__ : str = True try: AutoConfig.register('''custom''' , lowerCamelCase_ ) AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # If remote code is not set, the default is to use local lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(not hasattr(lowerCamelCase_ , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
90
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
90
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __UpperCAmelCase = { '''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''], '''tokenization_ctrl''': ['''CTRLTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CTRLForSequenceClassification''', '''CTRLLMHeadModel''', '''CTRLModel''', '''CTRLPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCTRLForSequenceClassification''', '''TFCTRLLMHeadModel''', '''TFCTRLModel''', '''TFCTRLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
90
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __UpperCAmelCase = TypeVar('''KEY''') __UpperCAmelCase = TypeVar('''VAL''') @dataclass(frozen=a__ , slots=a__ ) class a__ ( Generic[KEY, VAL] ): '''simple docstring''' lowercase__ : KEY lowercase__ : VAL class a__ ( _Item ): '''simple docstring''' def __init__( self ) -> None: super().__init__(lowerCamelCase_ , lowerCamelCase_ ) def __bool__( self ) -> bool: return False __UpperCAmelCase = _DeletedItem() class a__ ( MutableMapping[KEY, VAL] ): '''simple docstring''' def __init__( self , lowerCamelCase_ = 8 , lowerCamelCase_ = 0.75 ) -> None: lowerCAmelCase__ = initial_block_size lowerCAmelCase__ = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 lowerCAmelCase__ = capacity_factor lowerCAmelCase__ = 0 def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return hash(lowerCamelCase_ ) % len(self._buckets ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return (ind + 1) % len(self._buckets ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool: lowerCAmelCase__ = self._buckets[ind] if not stored: lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ ) self._len += 1 return True elif stored.key == key: lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ ) return True else: return False def __SCREAMING_SNAKE_CASE ( self ) -> bool: lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor return len(self ) >= int(lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> bool: if len(self._buckets ) <= self._initial_block_size: return False lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None: lowerCAmelCase__ = self._buckets lowerCAmelCase__ = [None] * new_size lowerCAmelCase__ = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def __SCREAMING_SNAKE_CASE ( self ) -> None: self._resize(len(self._buckets ) * 2 ) def __SCREAMING_SNAKE_CASE ( self ) -> None: self._resize(len(self._buckets ) // 2 ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterator[int]: lowerCAmelCase__ = self._get_bucket_index(lowerCamelCase_ ) for _ in range(len(self._buckets ) ): yield ind lowerCAmelCase__ = self._get_next_ind(lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None: for ind in self._iterate_buckets(lowerCamelCase_ ): if self._try_set(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): break def __setitem__( self , lowerCamelCase_ , lowerCamelCase_ ) -> None: if self._is_full(): self._size_up() self._add_item(lowerCamelCase_ , lowerCamelCase_ ) def __delitem__( self , lowerCamelCase_ ) -> None: for ind in self._iterate_buckets(lowerCamelCase_ ): lowerCAmelCase__ = self._buckets[ind] if item is None: raise KeyError(lowerCamelCase_ ) if item is _deleted: continue if item.key == key: lowerCAmelCase__ = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , lowerCamelCase_ ) -> VAL: for ind in self._iterate_buckets(lowerCamelCase_ ): lowerCAmelCase__ = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(lowerCamelCase_ ) def __len__( self ) -> int: return self._len def __iter__( self ) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self ) -> str: lowerCAmelCase__ = ''' ,'''.join( F"""{item.key}: {item.val}""" for item in self._buckets if item ) return F"""HashMap({val_string})"""
90
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __UpperCAmelCase = { '''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongT5EncoderModel''', '''LongT5ForConditionalGeneration''', '''LongT5Model''', '''LongT5PreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''FlaxLongT5ForConditionalGeneration''', '''FlaxLongT5Model''', '''FlaxLongT5PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
90
'''simple docstring''' import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def _snake_case ( A , A , A ) -> Union[str, Any]: lowerCAmelCase__ = OmegaConf.load(A ) lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )['''model'''] lowerCAmelCase__ = list(state_dict.keys() ) # extract state_dict for VQVAE lowerCAmelCase__ = {} lowerCAmelCase__ = '''first_stage_model.''' for key in keys: if key.startswith(A ): lowerCAmelCase__ = state_dict[key] # extract state_dict for UNetLDM lowerCAmelCase__ = {} lowerCAmelCase__ = '''model.diffusion_model.''' for key in keys: if key.startswith(A ): lowerCAmelCase__ = state_dict[key] lowerCAmelCase__ = config.model.params.first_stage_config.params lowerCAmelCase__ = config.model.params.unet_config.params lowerCAmelCase__ = VQModel(**A ).eval() vqvae.load_state_dict(A ) lowerCAmelCase__ = UNetLDMModel(**A ).eval() unet.load_state_dict(A ) lowerCAmelCase__ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=A , ) lowerCAmelCase__ = LDMPipeline(A , A , A ) pipeline.save_pretrained(A ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', type=str, required=True) parser.add_argument('''--config_path''', type=str, required=True) parser.add_argument('''--output_path''', type=str, required=True) __UpperCAmelCase = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
90
1
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def _snake_case ( A = "laptop" ) -> DataFrame: lowerCAmelCase__ = F"""https://www.amazon.in/laptop/s?k={product}""" lowerCAmelCase__ = { '''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''', '''Accept-Language''': '''en-US, en;q=0.5''', } lowerCAmelCase__ = BeautifulSoup(requests.get(A , headers=A ).text ) # Initialize a Pandas dataframe with the column titles lowerCAmelCase__ = DataFrame( columns=[ '''Product Title''', '''Product Link''', '''Current Price of the product''', '''Product Rating''', '''MRP of the product''', '''Discount''', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( '''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ): try: lowerCAmelCase__ = item.ha.text lowerCAmelCase__ = '''https://www.amazon.in/''' + item.ha.a['''href'''] lowerCAmelCase__ = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text try: lowerCAmelCase__ = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text except AttributeError: lowerCAmelCase__ = '''Not available''' try: lowerCAmelCase__ = ( '''₹''' + item.find( '''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1] ) except AttributeError: lowerCAmelCase__ = '''''' try: lowerCAmelCase__ = float( ( ( float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) - float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) ) ) / float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) ) * 100 ) except ValueError: lowerCAmelCase__ = float('''nan''' ) except AttributeError: pass lowerCAmelCase__ = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] lowerCAmelCase__ = ''' ''' lowerCAmelCase__ = ''' ''' data_frame.index += 1 return data_frame if __name__ == "__main__": __UpperCAmelCase = '''headphones''' get_amazon_product_data(product).to_csv(f"""Amazon Product Data for {product}.csv""")
90
'''simple docstring''' import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __UpperCAmelCase = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class a__ ( a__ ): '''simple docstring''' lowercase__ : bool = field(default=a__ , metadata={"help": "Whether to use SortishSampler or not."} ) lowercase__ : bool = field( default=a__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowercase__ : Optional[int] = field( default=a__ , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowercase__ : Optional[int] = field( default=a__ , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowercase__ : Optional[Union[str, Path, GenerationConfig]] = field( default=a__ , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: lowerCAmelCase__ = super().to_dict() for k, v in d.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): lowerCAmelCase__ = v.to_dict() return d
90
1
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = '''ylacombe/bark-small''' lowerCAmelCase__ = tempfile.mkdtemp() lowerCAmelCase__ = '''en_speaker_1''' lowerCAmelCase__ = '''This is a test string''' lowerCAmelCase__ = '''speaker_embeddings_path.json''' lowerCAmelCase__ = '''speaker_embeddings''' def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> int: return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: shutil.rmtree(self.tmpdirname ) def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = BarkProcessor(tokenizer=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase__ = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowerCAmelCase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowerCAmelCase__ = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowerCAmelCase__ = 35 lowerCAmelCase__ = 2 lowerCAmelCase__ = 8 lowerCAmelCase__ = { '''semantic_prompt''': np.ones(lowerCamelCase_ ), '''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ), '''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset lowerCAmelCase__ = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) lowerCAmelCase__ = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file lowerCAmelCase__ = os.path.join(self.tmpdirname , '''file.npz''' ) np.savez(lowerCamelCase_ , **lowerCamelCase_ ) lowerCAmelCase__ = processor(text=self.input_string , voice_preset=lowerCamelCase_ ) lowerCAmelCase__ = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub lowerCAmelCase__ = processor(text=self.input_string , voice_preset=self.voice_preset ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = BarkProcessor(tokenizer=lowerCamelCase_ ) lowerCAmelCase__ = processor(text=self.input_string ) lowerCAmelCase__ = tokenizer( self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
90
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device __UpperCAmelCase = False class a__ ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe.dual_guided( prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase_ ) lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = generator.manual_seed(0 ) lowerCAmelCase__ = pipe.dual_guided( prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = '''cyberpunk 2077''' lowerCAmelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe.dual_guided( prompt=lowerCamelCase_ , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 lowerCAmelCase__ = '''A painting of a squirrel eating a burger ''' lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe.text_to_image( prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 lowerCAmelCase__ = pipe.image_variation(lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''numpy''' ).images lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
90
1
'''simple docstring''' def _snake_case ( A ) -> int: if not isinstance(A , A ): lowerCAmelCase__ = F"""Input value of [number={number}] must be an integer""" raise TypeError(A ) if number < 1: lowerCAmelCase__ = F"""Input value of [number={number}] must be > 0""" raise ValueError(A ) lowerCAmelCase__ = 1 for i in range(1 , A ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
90
'''simple docstring''' from __future__ import annotations def _snake_case ( A ) -> bool: lowerCAmelCase__ = str(A ) return len(A ) == 9 and set(A ) == set('''123456789''' ) def _snake_case ( ) -> int | None: for base_num in range(9999 , 4999 , -1 ): lowerCAmelCase__ = 100002 * base_num if is_9_pandigital(A ): return candidate for base_num in range(333 , 99 , -1 ): lowerCAmelCase__ = 1002003 * base_num if is_9_pandigital(A ): return candidate return None if __name__ == "__main__": print(f"""{solution() = }""")
90
1
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers __UpperCAmelCase = '''3''' print('''Python version:''', sys.version) print('''transformers version:''', transformers.__version__) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) print('''NCCL version:''', torch.cuda.nccl.version()) except ImportError: print('''Torch version:''', None) try: import deepspeed print('''DeepSpeed version:''', deepspeed.__version__) except ImportError: print('''DeepSpeed version:''', None) try: import tensorflow as tf print('''TensorFlow version:''', tf.__version__) print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU'''))) print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU'''))) except ImportError: print('''TensorFlow version:''', None)
90
'''simple docstring''' # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES __UpperCAmelCase = '''tiny-wmt19-en-ru''' # Build # borrowed from a test __UpperCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] __UpperCAmelCase = dict(zip(vocab, range(len(vocab)))) __UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] with tempfile.TemporaryDirectory() as tmpdirname: __UpperCAmelCase = Path(tmpdirname) __UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file'''] __UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file'''] __UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file'''] with open(src_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, '''w''') as fp: fp.write('''\n'''.join(merges)) __UpperCAmelCase = FSMTTokenizer( langs=['''en''', '''ru'''], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) __UpperCAmelCase = FSMTConfig( langs=['''ru''', '''en'''], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) __UpperCAmelCase = FSMTForConditionalGeneration(config) print(f"""num of params {tiny_model.num_parameters()}""") # Test __UpperCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''') __UpperCAmelCase = tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"""Generated {mname_tiny}""") # Upload # transformers-cli upload tiny-wmt19-en-ru
90
1
'''simple docstring''' def _snake_case ( A = 3 , A = 7 , A = 1000000 ) -> int: lowerCAmelCase__ = 0 lowerCAmelCase__ = 1 for current_denominator in range(1 , limit + 1 ): lowerCAmelCase__ = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: lowerCAmelCase__ = current_numerator lowerCAmelCase__ = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_000_000))
90
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def _snake_case ( ) -> Union[str, Any]: raise RuntimeError('''CUDA out of memory.''' ) class a__ ( nn.Module ): '''simple docstring''' def __init__( self ) -> int: super().__init__() lowerCAmelCase__ = nn.Linear(3 , 4 ) lowerCAmelCase__ = nn.BatchNormad(4 ) lowerCAmelCase__ = nn.Linear(4 , 5 ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]: return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_ ) ) ) class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(lowerCamelCase_ ): nonlocal batch_sizes batch_sizes.append(lowerCamelCase_ ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ ): nonlocal batch_sizes batch_sizes.append(lowerCamelCase_ ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga lowerCAmelCase__ , lowerCAmelCase__ = mock_training_loop_function('''hello''' ) self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(lowerCamelCase_ ): pass with self.assertRaises(lowerCamelCase_ ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(lowerCamelCase_ ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(lowerCamelCase_ ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(lowerCamelCase_ ) as cm: mock_training_loop_function(1_28 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(lowerCamelCase_ ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(lowerCamelCase_ ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = torch.cuda.memory_allocated() lowerCAmelCase__ = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_ ) lowerCAmelCase__ = release_memory(lowerCamelCase_ ) self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_ )
90
1
'''simple docstring''' import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a__ ( a__ ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: lowerCAmelCase__ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCamelCase_ , '''width_multiplier''' ) ) class a__ : '''simple docstring''' def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=64 , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_="swish" , lowerCamelCase_=3 , lowerCamelCase_=32 , lowerCamelCase_=0.1 , lowerCamelCase_=0.02 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=10 , lowerCamelCase_=None , lowerCamelCase_=0.25 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , ) -> Tuple: lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = image_size lowerCAmelCase__ = patch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = make_divisible(5_12 * width_multiplier , divisor=8 ) lowerCAmelCase__ = hidden_act lowerCAmelCase__ = conv_kernel_size lowerCAmelCase__ = output_stride lowerCAmelCase__ = classifier_dropout_prob lowerCAmelCase__ = use_labels lowerCAmelCase__ = is_training lowerCAmelCase__ = num_labels lowerCAmelCase__ = initializer_range lowerCAmelCase__ = scope lowerCAmelCase__ = width_multiplier lowerCAmelCase__ = ffn_dropout lowerCAmelCase__ = attn_dropout def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ = None lowerCAmelCase__ = None if self.use_labels: lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels ) lowerCAmelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCAmelCase__ = self.get_config() return config, pixel_values, labels, pixel_labels def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any: lowerCAmelCase__ = MobileViTVaModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = model(lowerCamelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int: lowerCAmelCase__ = self.num_labels lowerCAmelCase__ = MobileViTVaForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]: lowerCAmelCase__ = self.num_labels lowerCAmelCase__ = MobileViTVaForSemanticSegmentation(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = model(lowerCamelCase_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowerCAmelCase__ = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: lowerCAmelCase__ = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs lowerCAmelCase__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class a__ ( a__ , a__ , unittest.TestCase ): '''simple docstring''' lowercase__ : Tuple = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) lowercase__ : int = ( { "feature-extraction": MobileViTVaModel, "image-classification": MobileViTVaForImageClassification, "image-segmentation": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) lowercase__ : Dict = False lowercase__ : Optional[int] = False lowercase__ : int = False lowercase__ : List[str] = False def __SCREAMING_SNAKE_CASE ( self ) -> Dict: lowerCAmelCase__ = MobileViTVaModelTester(self ) lowerCAmelCase__ = MobileViTVaConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: self.config_tester.run_common_tests() @unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: pass @unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: pass @unittest.skip(reason='''MobileViTV2 does not output attentions''' ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: pass @require_torch_multi_gpu @unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: pass def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(lowerCamelCase_ ) lowerCAmelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ = [*signature.parameters.keys()] lowerCAmelCase__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: def check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): lowerCAmelCase__ = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): lowerCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) lowerCAmelCase__ = outputs.hidden_states lowerCAmelCase__ = 5 self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. lowerCAmelCase__ = 2 for i in range(len(lowerCamelCase_ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase_ ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> Dict: for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ = MobileViTVaModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def _snake_case ( ) -> int: lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class a__ ( unittest.TestCase ): '''simple docstring''' @cached_property def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: return ( MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ) if is_vision_available() else None ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: lowerCAmelCase__ = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to( lowerCamelCase_ ) lowerCAmelCase__ = self.default_image_processor lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): lowerCAmelCase__ = model(**lowerCamelCase_ ) # verify the logits lowerCAmelCase__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) lowerCAmelCase__ = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) lowerCAmelCase__ = model.to(lowerCamelCase_ ) lowerCAmelCase__ = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): lowerCAmelCase__ = model(**lowerCamelCase_ ) lowerCAmelCase__ = outputs.logits # verify the logits lowerCAmelCase__ = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , lowerCamelCase_ ) lowerCAmelCase__ = torch.tensor( [ [[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]], [[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]], [[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]], ] , device=lowerCamelCase_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase_ , atol=1e-4 ) ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: lowerCAmelCase__ = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) lowerCAmelCase__ = model.to(lowerCamelCase_ ) lowerCAmelCase__ = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): lowerCAmelCase__ = model(**lowerCamelCase_ ) lowerCAmelCase__ = outputs.logits.detach().cpu() lowerCAmelCase__ = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase_ , target_sizes=[(50, 60)] ) lowerCAmelCase__ = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , lowerCamelCase_ ) lowerCAmelCase__ = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase_ ) lowerCAmelCase__ = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , lowerCamelCase_ )
90
'''simple docstring''' import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __UpperCAmelCase = logging.getLogger(__name__) def _snake_case ( A , A , A = None , A = None , A = None , A = None , A = None , A = False , ) -> Union[str, Any]: lowerCAmelCase__ = bnb_quantization_config.load_in_abit lowerCAmelCase__ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( '''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,''' ''' make sure you have the latest version of `bitsandbytes` installed.''' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( '''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,''' '''make sure you have the latest version of `bitsandbytes` installed.''' ) lowerCAmelCase__ = [] # custom device map if isinstance(A , A ) and len(device_map.keys() ) > 1: lowerCAmelCase__ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: lowerCAmelCase__ = get_keys_to_not_convert(A ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(A ) lowerCAmelCase__ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: lowerCAmelCase__ = [] lowerCAmelCase__ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(A ) # compatibility with peft lowerCAmelCase__ = load_in_abit lowerCAmelCase__ = load_in_abit lowerCAmelCase__ = get_parameter_device(A ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( '''It is not recommended to quantize a loaded model. ''' '''The model should be instantiated under the `init_empty_weights` context manager.''' ) lowerCAmelCase__ = replace_with_bnb_layers(A , A , modules_to_not_convert=A ) # convert param to the right dtype lowerCAmelCase__ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: lowerCAmelCase__ = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' ) lowerCAmelCase__ = getattr(A , A , A ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(A ): param.to(A ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info( F"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" '''We move the model to cuda.''' ) return model elif weights_location is None: raise RuntimeError( F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): lowerCAmelCase__ = replace_with_bnb_layers( A , A , modules_to_not_convert=A ) lowerCAmelCase__ = get_quantized_model_device_map( A , A , A , max_memory=A , no_split_module_classes=A , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): lowerCAmelCase__ = True lowerCAmelCase__ = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] ) load_checkpoint_in_model( A , A , A , dtype=bnb_quantization_config.torch_dtype , offload_folder=A , offload_state_dict=A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(A , device_map=A , offload_dir=A ) def _snake_case ( A , A , A=None , A=None , A=None ) -> List[Any]: if device_map is None: if torch.cuda.is_available(): lowerCAmelCase__ = {'''''': torch.cuda.current_device()} else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' ) if isinstance(A , A ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( '''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ''' '''\'sequential\'.''' ) lowerCAmelCase__ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) lowerCAmelCase__ = {} lowerCAmelCase__ = special_dtypes lowerCAmelCase__ = no_split_module_classes lowerCAmelCase__ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": lowerCAmelCase__ = get_balanced_memory( A , low_zero=(device_map == '''balanced_low_0''') , max_memory=A , **A , ) lowerCAmelCase__ = max_memory lowerCAmelCase__ = infer_auto_device_map(A , **A ) if isinstance(A , A ): # check if don't have any quantized module on the cpu lowerCAmelCase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules lowerCAmelCase__ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( ''' Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. ''' ) else: logger.info( '''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' ) del device_map_without_some_modules return device_map def _snake_case ( A , A , A=None , A=None ) -> Any: if modules_to_not_convert is None: lowerCAmelCase__ = [] lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers( A , A , A , A ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def _snake_case ( A , A , A=None , A=None , ) -> Optional[Any]: lowerCAmelCase__ = False for name, module in model.named_children(): if current_key_name is None: lowerCAmelCase__ = [] current_key_name.append(A ) if isinstance(A , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` lowerCAmelCase__ = '''.'''.join(A ) lowerCAmelCase__ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: lowerCAmelCase__ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: lowerCAmelCase__ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: lowerCAmelCase__ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' ) lowerCAmelCase__ = module.weight.data if module.bias is not None: lowerCAmelCase__ = module.bias.data bnb_module.requires_grad_(A ) setattr(A , A , A ) lowerCAmelCase__ = True if len(list(module.children() ) ) > 0: lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers( A , A , A , A ) lowerCAmelCase__ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _snake_case ( A ) -> Tuple: # Create a copy of the model with init_empty_weights(): lowerCAmelCase__ = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager` lowerCAmelCase__ = find_tied_parameters(A ) # For compatibility with Accelerate < 0.18 if isinstance(A , A ): lowerCAmelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowerCAmelCase__ = sum(A , [] ) lowerCAmelCase__ = len(A ) > 0 # Check if it is a base model lowerCAmelCase__ = False if hasattr(A , '''base_model_prefix''' ): lowerCAmelCase__ = not hasattr(A , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowerCAmelCase__ = list(model.named_children() ) lowerCAmelCase__ = [list_modules[-1][0]] # add last module together with tied weights lowerCAmelCase__ = set(A ) - set(A ) lowerCAmelCase__ = list(set(A ) ) + list(A ) # remove ".weight" from the keys lowerCAmelCase__ = ['''.weight''', '''.bias'''] lowerCAmelCase__ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowerCAmelCase__ = name.replace(A , '''''' ) filtered_module_names.append(A ) return filtered_module_names def _snake_case ( A ) -> Optional[int]: for m in model.modules(): if isinstance(A , bnb.nn.Linearabit ): return True return False def _snake_case ( A ) -> Union[str, Any]: return next(parameter.parameters() ).device def _snake_case ( A , A , A , A , A , A , A ) -> Any: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(A , A , 0 , dtype=A , value=A ) lowerCAmelCase__ = param_name lowerCAmelCase__ = model if "." in tensor_name: lowerCAmelCase__ = tensor_name.split('''.''' ) for split in splits[:-1]: lowerCAmelCase__ = getattr(A , A ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) lowerCAmelCase__ = new_module lowerCAmelCase__ = splits[-1] # offload weights lowerCAmelCase__ = False offload_weight(module._parameters[tensor_name] , A , A , index=A ) if hasattr(module._parameters[tensor_name] , '''SCB''' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A , index=A , ) else: offload_weight(A , A , A , index=A ) offload_weight(A , param_name.replace('''weight''' , '''SCB''' ) , A , index=A ) set_module_tensor_to_device(A , A , '''meta''' , dtype=A , value=torch.empty(*param.size() ) )
90
1
'''simple docstring''' __UpperCAmelCase = 8.314_4598 def _snake_case ( A , A ) -> float: if temperature < 0: raise Exception('''Temperature cannot be less than 0 K''' ) if molar_mass <= 0: raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example __UpperCAmelCase = 300 __UpperCAmelCase = 28 __UpperCAmelCase = rms_speed_of_molecule(temperature, molar_mass) print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
90
'''simple docstring''' from collections.abc import Callable import numpy as np def _snake_case ( A , A , A , A , A ) -> np.array: lowerCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) ) lowerCAmelCase__ = np.zeros((n + 1,) ) lowerCAmelCase__ = ya lowerCAmelCase__ = xa for k in range(A ): lowerCAmelCase__ = y[k] + step_size * ode_func(A , y[k] ) lowerCAmelCase__ = y[k] + ( (step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
90
1
'''simple docstring''' import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class a__ : '''simple docstring''' def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=30 , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=10 , lowerCamelCase_=0.02 , lowerCamelCase_=None , lowerCamelCase_=2 , ) -> Any: lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = image_size lowerCAmelCase__ = patch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = is_training lowerCAmelCase__ = use_labels lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = type_sequence_label_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = scope lowerCAmelCase__ = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase__ = (image_size // patch_size) ** 2 lowerCAmelCase__ = num_patches + 1 def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ = None if self.use_labels: lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ = self.get_config() return config, pixel_values, labels def __SCREAMING_SNAKE_CASE ( self ) -> Dict: return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]: lowerCAmelCase__ = ViTModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str: lowerCAmelCase__ = ViTForMaskedImageModeling(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = model(lowerCamelCase_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase__ = 1 lowerCAmelCase__ = ViTForMaskedImageModeling(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ = model(lowerCamelCase_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple: lowerCAmelCase__ = self.type_sequence_label_size lowerCAmelCase__ = ViTForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCAmelCase__ = 1 lowerCAmelCase__ = ViTForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) = config_and_inputs lowerCAmelCase__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class a__ ( a__ , a__ , unittest.TestCase ): '''simple docstring''' lowercase__ : Any = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) lowercase__ : Tuple = ( {"feature-extraction": ViTModel, "image-classification": ViTForImageClassification} if is_torch_available() else {} ) lowercase__ : Any = True lowercase__ : Union[str, Any] = False lowercase__ : Any = False lowercase__ : Tuple = False def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = ViTModelTester(self ) lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: pass def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(lowerCamelCase_ ) lowerCAmelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ = [*signature.parameters.keys()] lowerCAmelCase__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ = ViTModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def _snake_case ( ) -> List[Any]: lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class a__ ( unittest.TestCase ): '''simple docstring''' @cached_property def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: lowerCAmelCase__ = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(lowerCamelCase_ ) lowerCAmelCase__ = self.default_image_processor lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): lowerCAmelCase__ = model(**lowerCamelCase_ ) # verify the logits lowerCAmelCase__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) lowerCAmelCase__ = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> str: # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. lowerCAmelCase__ = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(lowerCamelCase_ ) lowerCAmelCase__ = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=4_80 ) lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ) lowerCAmelCase__ = inputs.pixel_values.to(lowerCamelCase_ ) # forward pass with torch.no_grad(): lowerCAmelCase__ = model(lowerCamelCase_ , interpolate_pos_encoding=lowerCamelCase_ ) # verify the logits lowerCAmelCase__ = torch.Size((1, 36_01, 3_84) ) self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase_ ) lowerCAmelCase__ = torch.tensor( [[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' ) lowerCAmelCase__ = self.default_image_processor lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ) lowerCAmelCase__ = inputs.pixel_values.to(lowerCamelCase_ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): lowerCAmelCase__ = model(lowerCamelCase_ )
90
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class a__ : '''simple docstring''' def __init__( self , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=64 , lowerCamelCase_=None ) -> Dict: lowerCAmelCase__ = np.random.default_rng(lowerCamelCase_ ) lowerCAmelCase__ = length lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa ) lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self ) -> Any: return self.length def __getitem__( self , lowerCamelCase_ ) -> List[str]: return {"x": self.x[i], "y": self.y[i]} class a__ ( torch.nn.Module ): '''simple docstring''' def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> List[Any]: super().__init__() lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase__ = True def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Optional[Any]: if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) lowerCAmelCase__ = False return x * self.a[0] + self.b[0] class a__ ( torch.nn.Module ): '''simple docstring''' def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> Any: super().__init__() lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() ) lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() ) lowerCAmelCase__ = True def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any: if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) lowerCAmelCase__ = False return x * self.a + self.b def _snake_case ( A , A = 16 ) -> Any: from datasets import load_dataset from transformers import AutoTokenizer lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowerCAmelCase__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} lowerCAmelCase__ = load_dataset('''csv''' , data_files=A ) lowerCAmelCase__ = datasets['''train'''].unique('''label''' ) lowerCAmelCase__ = {v: i for i, v in enumerate(A )} def tokenize_function(A ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase__ = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A , padding='''max_length''' ) if "label" in examples: lowerCAmelCase__ = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase__ = datasets.map( A , batched=A , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(A ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. lowerCAmelCase__ = DataLoader(tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=2 ) lowerCAmelCase__ = DataLoader(tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=1 ) return train_dataloader, eval_dataloader
90
1
'''simple docstring''' __UpperCAmelCase = { '''meter''': '''m''', '''kilometer''': '''km''', '''megametre''': '''Mm''', '''gigametre''': '''Gm''', '''terametre''': '''Tm''', '''petametre''': '''Pm''', '''exametre''': '''Em''', '''zettametre''': '''Zm''', '''yottametre''': '''Ym''', } # Exponent of the factor(meter) __UpperCAmelCase = { '''m''': 0, '''km''': 3, '''Mm''': 6, '''Gm''': 9, '''Tm''': 12, '''Pm''': 15, '''Em''': 18, '''Zm''': 21, '''Ym''': 24, } def _snake_case ( A , A , A ) -> float: lowerCAmelCase__ = from_type.lower().strip('''s''' ) lowerCAmelCase__ = to_type.lower().strip('''s''' ) lowerCAmelCase__ = UNIT_SYMBOL.get(A , A ) lowerCAmelCase__ = UNIT_SYMBOL.get(A , A ) if from_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(A )}""" ) raise ValueError(A ) if to_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(A )}""" ) raise ValueError(A ) lowerCAmelCase__ = METRIC_CONVERSION[from_sanitized] lowerCAmelCase__ = METRIC_CONVERSION[to_sanitized] lowerCAmelCase__ = 1 if from_exponent > to_exponent: lowerCAmelCase__ = from_exponent - to_exponent else: lowerCAmelCase__ = -(to_exponent - from_exponent) return value * pow(10 , A ) if __name__ == "__main__": from doctest import testmod testmod()
90
'''simple docstring''' import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __UpperCAmelCase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def _snake_case ( A , A=None ) -> Optional[Any]: require_version(deps[pkg] , A )
90
1
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Dict: lowerCAmelCase__ = jnp.ones((batch_size, length) ) / length return scores def __SCREAMING_SNAKE_CASE ( self ) -> Any: lowerCAmelCase__ = None lowerCAmelCase__ = 20 lowerCAmelCase__ = self._get_uniform_logits(batch_size=2 , length=lowerCamelCase_ ) # tweak scores to not be uniform anymore lowerCAmelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch lowerCAmelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax lowerCAmelCase__ = jax.nn.softmax(lowerCamelCase_ , axis=-1 ) lowerCAmelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCAmelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 ) lowerCAmelCase__ = jax.nn.softmax(temp_dist_warper_sharper(lowerCamelCase_ , scores.copy() , cur_len=lowerCamelCase_ ) , axis=-1 ) lowerCAmelCase__ = jax.nn.softmax(temp_dist_warper_smoother(lowerCamelCase_ , scores.copy() , cur_len=lowerCamelCase_ ) , axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: lowerCAmelCase__ = None lowerCAmelCase__ = 10 lowerCAmelCase__ = 2 # create ramp distribution lowerCAmelCase__ = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy() lowerCAmelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size lowerCAmelCase__ = FlaxTopKLogitsWarper(3 ) lowerCAmelCase__ = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] ) # check special case lowerCAmelCase__ = 5 lowerCAmelCase__ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 ) lowerCAmelCase__ = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, length) ).copy() lowerCAmelCase__ = top_k_warp_safety_check(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = None lowerCAmelCase__ = 10 lowerCAmelCase__ = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) lowerCAmelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) lowerCAmelCase__ = FlaxTopPLogitsWarper(0.8 ) lowerCAmelCase__ = np.exp(top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 lowerCAmelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) ) # check edge cases with negative and extreme logits lowerCAmelCase__ = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme lowerCAmelCase__ = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept lowerCAmelCase__ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 ) lowerCAmelCase__ = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = 20 lowerCAmelCase__ = 4 lowerCAmelCase__ = 0 lowerCAmelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ ) # check that min length is applied at length 5 lowerCAmelCase__ = ids_tensor((batch_size, 20) , vocab_size=20 ) lowerCAmelCase__ = 5 lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = min_dist_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] ) # check that min length is not applied anymore at length 15 lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = 15 lowerCAmelCase__ = min_dist_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: lowerCAmelCase__ = 20 lowerCAmelCase__ = 4 lowerCAmelCase__ = 0 lowerCAmelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ ) # check that all scores are -inf except the bos_token_id score lowerCAmelCase__ = ids_tensor((batch_size, 1) , vocab_size=20 ) lowerCAmelCase__ = 1 lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 lowerCAmelCase__ = 3 lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() ) def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = 20 lowerCAmelCase__ = 4 lowerCAmelCase__ = 0 lowerCAmelCase__ = 5 lowerCAmelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ ) # check that all scores are -inf except the eos_token_id when max_length is reached lowerCAmelCase__ = ids_tensor((batch_size, 4) , vocab_size=20 ) lowerCAmelCase__ = 4 lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached lowerCAmelCase__ = 3 lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: lowerCAmelCase__ = 4 lowerCAmelCase__ = 10 lowerCAmelCase__ = 15 lowerCAmelCase__ = 2 lowerCAmelCase__ = 1 lowerCAmelCase__ = 15 # dummy input_ids and scores lowerCAmelCase__ = ids_tensor((batch_size, sequence_length) , lowerCamelCase_ ) lowerCAmelCase__ = input_ids.copy() lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = scores.copy() # instantiate all dist processors lowerCAmelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCAmelCase__ = FlaxTopKLogitsWarper(3 ) lowerCAmelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCAmelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ ) lowerCAmelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ ) lowerCAmelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ ) lowerCAmelCase__ = 10 # no processor list lowerCAmelCase__ = temp_dist_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) lowerCAmelCase__ = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) lowerCAmelCase__ = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) lowerCAmelCase__ = min_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) lowerCAmelCase__ = bos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) lowerCAmelCase__ = eos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) # with processor list lowerCAmelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCAmelCase__ = processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) # scores should be equal self.assertTrue(jnp.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = 4 lowerCAmelCase__ = 10 lowerCAmelCase__ = 15 lowerCAmelCase__ = 2 lowerCAmelCase__ = 1 lowerCAmelCase__ = 15 # dummy input_ids and scores lowerCAmelCase__ = ids_tensor((batch_size, sequence_length) , lowerCamelCase_ ) lowerCAmelCase__ = input_ids.copy() lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = scores.copy() # instantiate all dist processors lowerCAmelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCAmelCase__ = FlaxTopKLogitsWarper(3 ) lowerCAmelCase__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCAmelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ ) lowerCAmelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ ) lowerCAmelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ ) lowerCAmelCase__ = 10 # no processor list def run_no_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): lowerCAmelCase__ = temp_dist_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) lowerCAmelCase__ = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) lowerCAmelCase__ = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) lowerCAmelCase__ = min_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) lowerCAmelCase__ = bos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) lowerCAmelCase__ = eos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) return scores # with processor list def run_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): lowerCAmelCase__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCAmelCase__ = processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) return scores lowerCAmelCase__ = jax.jit(lowerCamelCase_ ) lowerCAmelCase__ = jax.jit(lowerCamelCase_ ) lowerCAmelCase__ = jitted_run_no_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = jitted_run_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # scores should be equal self.assertTrue(jnp.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
90
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( A , A=False , A=False , A=False ) -> Union[str, Any]: lowerCAmelCase__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") ) # embeddings rename_keys.extend( [ # text embeddings ('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''), ( '''text_embeddings.position_embeddings.weight''', '''vilt.embeddings.text_embeddings.position_embeddings.weight''', ), ('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''), ( '''text_embeddings.token_type_embeddings.weight''', '''vilt.embeddings.text_embeddings.token_type_embeddings.weight''', ), ('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''), ('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''), # patch embeddings ('''transformer.cls_token''', '''vilt.embeddings.cls_token'''), ('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''), ('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''), ('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''), # token type embeddings ('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''), ] ) # final layernorm + pooler rename_keys.extend( [ ('''transformer.norm.weight''', '''vilt.layernorm.weight'''), ('''transformer.norm.bias''', '''vilt.layernorm.bias'''), ('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''), ('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('''vqa_classifier.0.weight''', '''classifier.0.weight'''), ('''vqa_classifier.0.bias''', '''classifier.0.bias'''), ('''vqa_classifier.1.weight''', '''classifier.1.weight'''), ('''vqa_classifier.1.bias''', '''classifier.1.bias'''), ('''vqa_classifier.3.weight''', '''classifier.3.weight'''), ('''vqa_classifier.3.bias''', '''classifier.3.bias'''), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''), ('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''), ('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''), ('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''), ('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''), ('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''), ] ) else: pass return rename_keys def _snake_case ( A , A ) -> List[str]: for i in range(config.num_hidden_layers ): lowerCAmelCase__ = '''vilt.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" ) lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase__ = in_proj_weight[ : config.hidden_size, : ] lowerCAmelCase__ = in_proj_bias[: config.hidden_size] lowerCAmelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCAmelCase__ = in_proj_bias[-config.hidden_size :] def _snake_case ( A ) -> List[str]: lowerCAmelCase__ = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(A , A ) def _snake_case ( A , A , A ) -> str: lowerCAmelCase__ = dct.pop(A ) lowerCAmelCase__ = val @torch.no_grad() def _snake_case ( A , A ) -> Any: lowerCAmelCase__ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=A ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False if "vqa" in checkpoint_url: lowerCAmelCase__ = True lowerCAmelCase__ = 3129 lowerCAmelCase__ = '''huggingface/label-files''' lowerCAmelCase__ = '''vqa2-id2label.json''' lowerCAmelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase__ = {int(A ): v for k, v in idalabel.items()} lowerCAmelCase__ = idalabel lowerCAmelCase__ = {v: k for k, v in idalabel.items()} lowerCAmelCase__ = ViltForQuestionAnswering(A ) elif "nlvr" in checkpoint_url: lowerCAmelCase__ = True lowerCAmelCase__ = 2 lowerCAmelCase__ = {0: '''False''', 1: '''True'''} lowerCAmelCase__ = {v: k for k, v in config.idalabel.items()} lowerCAmelCase__ = 3 lowerCAmelCase__ = ViltForImagesAndTextClassification(A ) elif "irtr" in checkpoint_url: lowerCAmelCase__ = True lowerCAmelCase__ = ViltForImageAndTextRetrieval(A ) elif "mlm_itm" in checkpoint_url: lowerCAmelCase__ = True lowerCAmelCase__ = ViltForMaskedLM(A ) else: raise ValueError('''Unknown model type''' ) # load state_dict of original model, remove and rename some keys lowerCAmelCase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''state_dict'''] lowerCAmelCase__ = create_rename_keys(A , A , A , A ) for src, dest in rename_keys: rename_key(A , A , A ) read_in_q_k_v(A , A ) if mlm_model or irtr_model: lowerCAmelCase__ = ['''itm_score.fc.weight''', '''itm_score.fc.bias'''] for k in ignore_keys: state_dict.pop(A , A ) # load state dict into HuggingFace model model.eval() if mlm_model: lowerCAmelCase__ , lowerCAmelCase__ = model.load_state_dict(A , strict=A ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(A ) # Define processor lowerCAmelCase__ = ViltImageProcessor(size=384 ) lowerCAmelCase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' ) lowerCAmelCase__ = ViltProcessor(A , A ) # Forward pass on example inputs (image + text) if nlvr_model: lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw ) lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw ) lowerCAmelCase__ = ( '''The left image contains twice the number of dogs as the right image, and at least two dogs in total are''' ''' standing.''' ) lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' ) lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' ) lowerCAmelCase__ = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: lowerCAmelCase__ = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=A ).raw ) if mlm_model: lowerCAmelCase__ = '''a bunch of [MASK] laying on a [MASK].''' else: lowerCAmelCase__ = '''How many cats are there?''' lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' ) lowerCAmelCase__ = model(**A ) # Verify outputs if mlm_model: lowerCAmelCase__ = torch.Size([1, 11, 30522] ) lowerCAmelCase__ = torch.tensor([-12.5_061, -12.5_123, -12.5_174] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 ) # verify masked token prediction equals "cats" lowerCAmelCase__ = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: lowerCAmelCase__ = torch.Size([1, 3129] ) lowerCAmelCase__ = torch.tensor([-15.9_495, -18.1_472, -10.3_041] ) assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 ) # verify vqa prediction equals "2" lowerCAmelCase__ = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: lowerCAmelCase__ = torch.Size([1, 2] ) lowerCAmelCase__ = torch.tensor([-2.8_721, 2.1_291] ) assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(A ).mkdir(exist_ok=A ) print(F"""Saving model and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(A ) processor.save_pretrained(A ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) __UpperCAmelCase = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
90
1
'''simple docstring''' import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel __UpperCAmelCase = False __UpperCAmelCase = True __UpperCAmelCase = False if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--repo_path''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') __UpperCAmelCase = parser.parse_args() __UpperCAmelCase = { '''image_size''': '''sample_size''', '''num_res_blocks''': '''layers_per_block''', '''block_channels''': '''block_out_channels''', '''down_blocks''': '''down_block_types''', '''up_blocks''': '''up_block_types''', '''downscale_freq_shift''': '''freq_shift''', '''resnet_num_groups''': '''norm_num_groups''', '''resnet_act_fn''': '''act_fn''', '''resnet_eps''': '''norm_eps''', '''num_head_channels''': '''attention_head_dim''', } __UpperCAmelCase = { '''time_steps''': '''time_proj''', '''mid''': '''mid_block''', '''downsample_blocks''': '''down_blocks''', '''upsample_blocks''': '''up_blocks''', } __UpperCAmelCase = '''''' if has_file(args.repo_path, '''config.json''') else '''unet''' with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader: __UpperCAmelCase = reader.read() __UpperCAmelCase = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, '''config.json'''): __UpperCAmelCase = UNetaDModel(**config) else: __UpperCAmelCase = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel __UpperCAmelCase = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) __UpperCAmelCase = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: __UpperCAmelCase = config[key] del config[key] __UpperCAmelCase = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']] __UpperCAmelCase = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']] if do_only_weights: __UpperCAmelCase = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin''')) __UpperCAmelCase = {} for param_key, param_value in state_dict.items(): if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''): continue __UpperCAmelCase = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split('''.''')[0] == key: __UpperCAmelCase = param_value __UpperCAmelCase = True if not has_changed: __UpperCAmelCase = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
90
'''simple docstring''' import re def _snake_case ( A ) -> bool: lowerCAmelCase__ = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' ) if match := re.search(A , A ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator('''+918827897895'''))
90
1
'''simple docstring''' import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def _snake_case ( A , A=False ) -> Optional[Any]: try: lowerCAmelCase__ = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowerCAmelCase__ = default else: # KEY is set, convert it to True or False. try: lowerCAmelCase__ = strtobool(A ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"""If set, {key} must be yes or no.""" ) return _value __UpperCAmelCase = parse_flag_from_env('''RUN_SLOW''', default=False) def _snake_case ( A ) -> List[Any]: return unittest.skip('''Test was skipped''' )(A ) def _snake_case ( A ) -> Tuple: return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(A ) def _snake_case ( A ) -> Any: return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(A ) def _snake_case ( A ) -> str: return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(A ) def _snake_case ( A ) -> str: return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(A ) def _snake_case ( A ) -> Dict: return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(A ) def _snake_case ( A ) -> Tuple: return unittest.skipUnless( is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(A ) def _snake_case ( A ) -> Dict: return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(A ) def _snake_case ( A ) -> Optional[Any]: return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(A ) def _snake_case ( A ) -> Optional[int]: return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(A ) def _snake_case ( A ) -> List[str]: return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(A ) def _snake_case ( A ) -> List[Any]: return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(A ) def _snake_case ( A ) -> str: return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(A ) def _snake_case ( A ) -> Any: return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(A ) def _snake_case ( A ) -> Optional[int]: return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(A ) def _snake_case ( A ) -> Union[str, Any]: return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(A ) def _snake_case ( A=None , A=None ) -> Union[str, Any]: if test_case is None: return partial(A , version=A ) return unittest.skipUnless(is_torch_version('''>=''' , A ) , F"""test requires torch version >= {version}""" )(A ) def _snake_case ( A ) -> List[str]: return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(A ) def _snake_case ( A ) -> Optional[int]: return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(A ) def _snake_case ( A ) -> Optional[int]: return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(A ) __UpperCAmelCase = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def _snake_case ( A ) -> Optional[int]: return unittest.skipUnless( _atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(A ) class a__ ( unittest.TestCase ): '''simple docstring''' lowercase__ : List[Any] = True @classmethod def __SCREAMING_SNAKE_CASE ( cls ) -> str: lowerCAmelCase__ = tempfile.mkdtemp() @classmethod def __SCREAMING_SNAKE_CASE ( cls ) -> str: if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def __SCREAMING_SNAKE_CASE ( self ) -> str: if self.clear_on_setup: for path in Path(self.tmpdir ).glob('''**/*''' ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(lowerCamelCase_ ) class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[Any]: lowerCAmelCase__ = mocks if isinstance(lowerCamelCase_ , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def _snake_case ( A ) -> int: lowerCAmelCase__ = AcceleratorState() lowerCAmelCase__ = tensor[None].clone().to(state.device ) lowerCAmelCase__ = gather(A ).cpu() lowerCAmelCase__ = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , A ): return False return True class a__ : '''simple docstring''' def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple: lowerCAmelCase__ = returncode lowerCAmelCase__ = stdout lowerCAmelCase__ = stderr async def _snake_case ( A , A ) -> Union[str, Any]: while True: lowerCAmelCase__ = await stream.readline() if line: callback(A ) else: break async def _snake_case ( A , A=None , A=None , A=None , A=False , A=False ) -> _RunOutput: if echo: print('''\nRunning: ''' , ''' '''.join(A ) ) lowerCAmelCase__ = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowerCAmelCase__ = [] lowerCAmelCase__ = [] def tee(A , A , A , A="" ): lowerCAmelCase__ = line.decode('''utf-8''' ).rstrip() sink.append(A ) if not quiet: print(A , A , file=A ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda A : tee(A , A , sys.stdout , label='''stdout:''' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda A : tee(A , A , sys.stderr , label='''stderr:''' ) ) ), ] , timeout=A , ) return _RunOutput(await p.wait() , A , A ) def _snake_case ( A , A=None , A=None , A=180 , A=False , A=True ) -> _RunOutput: lowerCAmelCase__ = asyncio.get_event_loop() lowerCAmelCase__ = loop.run_until_complete( _stream_subprocess(A , env=A , stdin=A , timeout=A , quiet=A , echo=A ) ) lowerCAmelCase__ = ''' '''.join(A ) if result.returncode > 0: lowerCAmelCase__ = '''\n'''.join(result.stderr ) raise RuntimeError( F"""'{cmd_str}' failed with returncode {result.returncode}\n\n""" F"""The combined stderr from workers follows:\n{stderr}""" ) return result class a__ ( a__ ): '''simple docstring''' pass def _snake_case ( A , A=False ) -> Optional[int]: try: lowerCAmelCase__ = subprocess.check_output(A , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(A , '''decode''' ): lowerCAmelCase__ = output.decode('''utf-8''' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"""Command `{" ".join(A )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
90
'''simple docstring''' import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'''vocab_file''': '''vocab.txt'''} __UpperCAmelCase = { '''vocab_file''': { '''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''', '''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''', }, } __UpperCAmelCase = { '''facebook/esm2_t6_8M_UR50D''': 1_024, '''facebook/esm2_t12_35M_UR50D''': 1_024, } def _snake_case ( A ) -> Optional[Any]: with open(A , '''r''' ) as f: lowerCAmelCase__ = f.read().splitlines() return [l.strip() for l in lines] class a__ ( a__ ): '''simple docstring''' lowercase__ : Optional[Any] = VOCAB_FILES_NAMES lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_="<eos>" , **lowerCamelCase_ , ) -> Tuple: super().__init__(**lowerCamelCase_ ) lowerCAmelCase__ = load_vocab_file(lowerCamelCase_ ) lowerCAmelCase__ = dict(enumerate(self.all_tokens ) ) lowerCAmelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )} lowerCAmelCase__ = unk_token lowerCAmelCase__ = cls_token lowerCAmelCase__ = pad_token lowerCAmelCase__ = mask_token lowerCAmelCase__ = eos_token lowerCAmelCase__ = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str: return self._id_to_token.get(lowerCamelCase_ , self.unk_token ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]: return text.split() def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False ) -> Dict: return len(self._id_to_token ) def __SCREAMING_SNAKE_CASE ( self ) -> int: return {token: i for i, token in enumerate(self.all_tokens )} def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str: return self._id_to_token.get(lowerCamelCase_ , self.unk_token ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]: lowerCAmelCase__ = [self.cls_token_id] lowerCAmelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] lowerCAmelCase__ = [1] + ([0] * len(lowerCamelCase_ )) + [1] if token_ids_a is not None: mask += [0] * len(lowerCamelCase_ ) + [1] return mask def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]: lowerCAmelCase__ = os.path.join(lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' ) with open(lowerCamelCase_ , '''w''' ) as f: f.write('''\n'''.join(self.all_tokens ) ) return (vocab_file,) @property def __SCREAMING_SNAKE_CASE ( self ) -> int: return self.get_vocab_size(with_added_tokens=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False ) -> int: return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ )
90
1
'''simple docstring''' import re def _snake_case ( A ) -> bool: lowerCAmelCase__ = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' ) if match := re.search(A , A ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator('''+918827897895'''))
90
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a__ ( a__ , a__ , a__ , unittest.TestCase ): '''simple docstring''' lowercase__ : Optional[Any] = AltDiffusionPipeline lowercase__ : Dict = TEXT_TO_IMAGE_PARAMS lowercase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS lowercase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS lowercase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS def __SCREAMING_SNAKE_CASE ( self ) -> str: torch.manual_seed(0 ) lowerCAmelCase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) lowerCAmelCase__ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , ) torch.manual_seed(0 ) lowerCAmelCase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) lowerCAmelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , ) lowerCAmelCase__ = CLIPTextModel(lowerCamelCase_ ) lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) lowerCAmelCase__ = 77 lowerCAmelCase__ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> List[str]: if str(lowerCamelCase_ ).startswith('''mps''' ): lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ ) else: lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) lowerCAmelCase__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ = self.get_dummy_components() torch.manual_seed(0 ) lowerCAmelCase__ = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , ) # TODO: remove after fixing the non-deterministic text encoder lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ ) lowerCAmelCase__ = text_encoder lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ ) lowerCAmelCase__ = '''A photo of an astronaut''' lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ = np.array( [0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ = self.get_dummy_components() lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) torch.manual_seed(0 ) lowerCAmelCase__ = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , ) # TODO: remove after fixing the non-deterministic text encoder lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ ) lowerCAmelCase__ = text_encoder lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ = np.array( [0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: # make sure here that pndm scheduler skips prk lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = '''A painting of a squirrel eating a burger''' lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __SCREAMING_SNAKE_CASE ( self ) -> Any: lowerCAmelCase__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' ) lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = '''A painting of a squirrel eating a burger''' lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
90
1
'''simple docstring''' import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __UpperCAmelCase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def _snake_case ( A , A=None ) -> Optional[Any]: require_version(deps[pkg] , A )
90
'''simple docstring''' def _snake_case ( A , A ) -> int: return x if y == 0 else greatest_common_divisor(A , x % y ) def _snake_case ( A , A ) -> int: return (x * y) // greatest_common_divisor(A , A ) def _snake_case ( A = 20 ) -> int: lowerCAmelCase__ = 1 for i in range(1 , n + 1 ): lowerCAmelCase__ = lcm(A , A ) return g if __name__ == "__main__": print(f"""{solution() = }""")
90
1
'''simple docstring''' import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class a__ ( a__ ): '''simple docstring''' def __init__( self , *lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ) -> List[Any]: super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) lowerCAmelCase__ = eval_examples lowerCAmelCase__ = post_process_function def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None , lowerCamelCase_=None , lowerCamelCase_ = None , lowerCamelCase_ = "eval" , **lowerCamelCase_ , ) -> Dict[str, float]: lowerCAmelCase__ = gen_kwargs.copy() lowerCAmelCase__ = ( gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length ) lowerCAmelCase__ = ( gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams ) lowerCAmelCase__ = gen_kwargs lowerCAmelCase__ = self.eval_dataset if eval_dataset is None else eval_dataset lowerCAmelCase__ = self.get_eval_dataloader(lowerCamelCase_ ) lowerCAmelCase__ = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowerCAmelCase__ = self.compute_metrics lowerCAmelCase__ = None lowerCAmelCase__ = time.time() lowerCAmelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowerCAmelCase__ = eval_loop( lowerCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase_ , metric_key_prefix=lowerCamelCase_ , ) finally: lowerCAmelCase__ = compute_metrics lowerCAmelCase__ = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( lowerCamelCase_ , lowerCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default lowerCAmelCase__ = self.post_process_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = self.compute_metrics(lowerCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): lowerCAmelCase__ = metrics.pop(lowerCamelCase_ ) metrics.update(output.metrics ) else: lowerCAmelCase__ = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(lowerCamelCase_ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowerCAmelCase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCamelCase_ ) return metrics def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_ = "test" , **lowerCamelCase_ ) -> int: lowerCAmelCase__ = gen_kwargs.copy() lowerCAmelCase__ = self.get_test_dataloader(lowerCamelCase_ ) # Temporarily disable metric computation, we will do it in the loop here. lowerCAmelCase__ = self.compute_metrics lowerCAmelCase__ = None lowerCAmelCase__ = time.time() lowerCAmelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowerCAmelCase__ = eval_loop( lowerCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase_ , metric_key_prefix=lowerCamelCase_ , ) finally: lowerCAmelCase__ = compute_metrics lowerCAmelCase__ = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( lowerCamelCase_ , lowerCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output lowerCAmelCase__ = self.post_process_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , '''predict''' ) lowerCAmelCase__ = self.compute_metrics(lowerCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): lowerCAmelCase__ = metrics.pop(lowerCamelCase_ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCamelCase_ )
90
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu __UpperCAmelCase = [ '''EAGER''', '''AOT_EAGER''', '''INDUCTOR''', '''NVFUSER''', '''AOT_NVFUSER''', '''AOT_CUDAGRAPHS''', '''OFI''', '''FX2TRT''', '''ONNXRT''', '''IPEX''', ] def _snake_case ( A , A=None , A=None , A=None ) -> Union[str, Any]: lowerCAmelCase__ = True while ask_again: lowerCAmelCase__ = input(A ) try: if default is not None and len(A ) == 0: return default return convert_value(A ) if convert_value is not None else result except Exception: if error_message is not None: print(A ) def _snake_case ( A , A=[] , A=None , A=0 ) -> List[Any]: lowerCAmelCase__ = BulletMenu(A , A ) lowerCAmelCase__ = menu.run(default_choice=A ) return convert_value(A ) if convert_value is not None else result def _snake_case ( A ) -> Tuple: lowerCAmelCase__ = int(A ) return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] ) def _snake_case ( A ) -> Union[str, Any]: lowerCAmelCase__ = int(A ) return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] ) def _snake_case ( A ) -> str: lowerCAmelCase__ = int(A ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def _snake_case ( A ) -> Tuple: lowerCAmelCase__ = int(A ) return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] ) def _snake_case ( A ) -> Union[str, Any]: lowerCAmelCase__ = int(A ) return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] ) def _snake_case ( A ) -> List[str]: return {"yes": True, "no": False}[value.lower()] class a__ ( argparse.RawDescriptionHelpFormatter ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict: lowerCAmelCase__ = super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = usage.replace('''<command> [<args>] ''' , '''''' ) return usage
90
1
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def _snake_case ( A , A=10 ) -> Any: lowerCAmelCase__ = [] for _ in range(A ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def _snake_case ( A , A=10 ) -> Optional[Any]: lowerCAmelCase__ = [] for step in range(A ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = os.path.join(A , '''schedule.bin''' ) torch.save(scheduler.state_dict() , A ) lowerCAmelCase__ = torch.load(A ) scheduler.load_state_dict(A ) return lrs @require_torch class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any: self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) ) for a, b in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertAlmostEqual(lowerCamelCase_ , lowerCamelCase_ , delta=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase_ ) lowerCAmelCase__ = torch.tensor([0.4, 0.2, -0.5] ) lowerCAmelCase__ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCAmelCase__ = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(1_00 ): lowerCAmelCase__ = criterion(lowerCamelCase_ , lowerCamelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: lowerCAmelCase__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase_ ) lowerCAmelCase__ = torch.tensor([0.4, 0.2, -0.5] ) lowerCAmelCase__ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCAmelCase__ = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCamelCase_ , weight_decay=0.0 , relative_step=lowerCamelCase_ , scale_parameter=lowerCamelCase_ , warmup_init=lowerCamelCase_ , ) for _ in range(10_00 ): lowerCAmelCase__ = criterion(lowerCamelCase_ , lowerCamelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class a__ ( unittest.TestCase ): '''simple docstring''' lowercase__ : List[str] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None lowercase__ : Tuple = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None lowercase__ : int = 1_0 def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None ) -> Optional[Any]: self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) ) for a, b in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertAlmostEqual(lowerCamelCase_ , lowerCamelCase_ , delta=lowerCamelCase_ , msg=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = {'''num_warmup_steps''': 2, '''num_training_steps''': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) lowerCAmelCase__ = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): lowerCAmelCase__ , lowerCAmelCase__ = data lowerCAmelCase__ = scheduler_func(self.optimizer , **lowerCamelCase_ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) lowerCAmelCase__ = unwrap_schedule(lowerCamelCase_ , self.num_steps ) self.assertListAlmostEqual( lowerCamelCase_ , lowerCamelCase_ , tol=1e-2 , msg=F"""failed for {scheduler_func} in normal scheduler""" , ) lowerCAmelCase__ = scheduler_func(self.optimizer , **lowerCamelCase_ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(lowerCamelCase_ ) # wrap to test picklability of the schedule lowerCAmelCase__ = unwrap_and_save_reload_schedule(lowerCamelCase_ , self.num_steps ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ , msg=F"""failed for {scheduler_func} in save and reload""" ) class a__ : '''simple docstring''' def __init__( self , lowerCamelCase_ ) -> Optional[Any]: lowerCAmelCase__ = fn def __call__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[int]: return self.fn(*lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Dict: lowerCAmelCase__ = list(map(self , scheduler.lr_lambdas ) )
90
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class a__ ( a__ ): '''simple docstring''' lowercase__ : torch.FloatTensor class a__ ( a__ , a__ ): '''simple docstring''' @register_to_config def __init__( self , lowerCamelCase_ = 3 , lowerCamelCase_ = 3 , lowerCamelCase_ = ("DownEncoderBlock2D",) , lowerCamelCase_ = ("UpDecoderBlock2D",) , lowerCamelCase_ = (64,) , lowerCamelCase_ = 1 , lowerCamelCase_ = "silu" , lowerCamelCase_ = 3 , lowerCamelCase_ = 32 , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 32 , lowerCamelCase_ = None , lowerCamelCase_ = 0.18_215 , lowerCamelCase_ = "group" , ) -> Union[str, Any]: super().__init__() # pass init params to Encoder lowerCAmelCase__ = Encoder( in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , ) lowerCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 ) lowerCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ ) lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 ) # pass init params to Decoder lowerCAmelCase__ = Decoder( in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , ) @apply_forward_hook def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> VQEncoderOutput: lowerCAmelCase__ = self.encoder(lowerCamelCase_ ) lowerCAmelCase__ = self.quant_conv(lowerCamelCase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowerCamelCase_ ) @apply_forward_hook def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: # also go through quantization layer if not force_not_quantize: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.quantize(lowerCamelCase_ ) else: lowerCAmelCase__ = h lowerCAmelCase__ = self.post_quant_conv(lowerCamelCase_ ) lowerCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == '''spatial''' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: lowerCAmelCase__ = sample lowerCAmelCase__ = self.encode(lowerCamelCase_ ).latents lowerCAmelCase__ = self.decode(lowerCamelCase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase_ )
90
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class a__ ( a__ ): '''simple docstring''' def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> None: warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
90
'''simple docstring''' from __future__ import annotations __UpperCAmelCase = list[list[int]] # assigning initial values to the grid __UpperCAmelCase = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __UpperCAmelCase = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def _snake_case ( A , A , A , A ) -> bool: for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def _snake_case ( A ) -> tuple[int, int] | None: for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def _snake_case ( A ) -> Matrix | None: if location := find_empty_location(A ): lowerCAmelCase__ , lowerCAmelCase__ = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(A , A , A , A ): lowerCAmelCase__ = digit if sudoku(A ) is not None: return grid lowerCAmelCase__ = 0 return None def _snake_case ( A ) -> None: for row in grid: for cell in row: print(A , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('''\nExample grid:\n''' + '''=''' * 20) print_solution(example_grid) print('''\nExample grid solution:''') __UpperCAmelCase = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('''Cannot find a solution.''')
90
1
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def _snake_case ( A , A ) -> str | Literal[False]: lowerCAmelCase__ = list(A ) lowerCAmelCase__ = list(A ) lowerCAmelCase__ = 0 for i in range(len(A ) ): if lista[i] != lista[i]: count += 1 lowerCAmelCase__ = '''_''' if count > 1: return False else: return "".join(A ) def _snake_case ( A ) -> list[str]: lowerCAmelCase__ = [] while True: lowerCAmelCase__ = ['''$'''] * len(A ) lowerCAmelCase__ = [] for i in range(len(A ) ): for j in range(i + 1 , len(A ) ): lowerCAmelCase__ = compare_string(binary[i] , binary[j] ) if k is False: lowerCAmelCase__ = '''*''' lowerCAmelCase__ = '''*''' temp.append('''X''' ) for i in range(len(A ) ): if checka[i] == "$": pi.append(binary[i] ) if len(A ) == 0: return pi lowerCAmelCase__ = list(set(A ) ) def _snake_case ( A , A ) -> list[str]: lowerCAmelCase__ = [] for minterm in minterms: lowerCAmelCase__ = '''''' for _ in range(A ): lowerCAmelCase__ = str(minterm % 2 ) + string minterm //= 2 temp.append(A ) return temp def _snake_case ( A , A , A ) -> bool: lowerCAmelCase__ = list(A ) lowerCAmelCase__ = list(A ) lowerCAmelCase__ = 0 for i in range(len(A ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def _snake_case ( A , A ) -> list[str]: lowerCAmelCase__ = [] lowerCAmelCase__ = [0] * len(A ) for i in range(len(chart[0] ) ): lowerCAmelCase__ = 0 lowerCAmelCase__ = -1 for j in range(len(A ) ): if chart[j][i] == 1: count += 1 lowerCAmelCase__ = j if count == 1: lowerCAmelCase__ = 1 for i in range(len(A ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(A ) ): lowerCAmelCase__ = 0 temp.append(prime_implicants[i] ) while True: lowerCAmelCase__ = 0 lowerCAmelCase__ = -1 lowerCAmelCase__ = 0 for i in range(len(A ) ): lowerCAmelCase__ = chart[i].count(1 ) if count_n > max_n: lowerCAmelCase__ = count_n lowerCAmelCase__ = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(A ) ): lowerCAmelCase__ = 0 def _snake_case ( A , A ) -> list[list[int]]: lowerCAmelCase__ = [[0 for x in range(len(A ) )] for x in range(len(A ) )] for i in range(len(A ) ): lowerCAmelCase__ = prime_implicants[i].count('''_''' ) for j in range(len(A ) ): if is_for_table(prime_implicants[i] , binary[j] , A ): lowerCAmelCase__ = 1 return chart def _snake_case ( ) -> None: lowerCAmelCase__ = int(input('''Enter the no. of variables\n''' ) ) lowerCAmelCase__ = [ float(A ) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split() ] lowerCAmelCase__ = decimal_to_binary(A , A ) lowerCAmelCase__ = check(A ) print('''Prime Implicants are:''' ) print(A ) lowerCAmelCase__ = prime_implicant_chart(A , A ) lowerCAmelCase__ = selection(A , A ) print('''Essential Prime Implicants are:''' ) print(A ) if __name__ == "__main__": import doctest doctest.testmod() main()
90
'''simple docstring''' def _snake_case ( A ) -> int: if n == 1 or not isinstance(A , A ): return 0 elif n == 2: return 1 else: lowerCAmelCase__ = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _snake_case ( A ) -> int: lowerCAmelCase__ = 0 lowerCAmelCase__ = 2 while digits < n: index += 1 lowerCAmelCase__ = len(str(fibonacci(A ) ) ) return index def _snake_case ( A = 1000 ) -> int: return fibonacci_digits_index(A ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
90
1
'''simple docstring''' class a__ : '''simple docstring''' def __init__( self , lowerCamelCase_ ) -> None: lowerCAmelCase__ = set_counts lowerCAmelCase__ = max(lowerCamelCase_ ) lowerCAmelCase__ = len(lowerCamelCase_ ) lowerCAmelCase__ = [1] * num_sets lowerCAmelCase__ = list(range(lowerCamelCase_ ) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> bool: lowerCAmelCase__ = self.get_parent(lowerCamelCase_ ) lowerCAmelCase__ = self.get_parent(lowerCamelCase_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowerCAmelCase__ = 0 lowerCAmelCase__ = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowerCAmelCase__ = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowerCAmelCase__ = 0 lowerCAmelCase__ = src_parent lowerCAmelCase__ = self.set_counts[src_parent] lowerCAmelCase__ = max(self.max_set , lowerCamelCase_ ) return True def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: if self.parents[disj_set] == disj_set: return disj_set lowerCAmelCase__ = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
90
'''simple docstring''' from __future__ import annotations from random import choice def _snake_case ( A ) -> int: return choice(A ) def _snake_case ( A , A ) -> int: lowerCAmelCase__ = random_pivot(A ) # partition based on pivot # linear time lowerCAmelCase__ = [e for e in lst if e < pivot] lowerCAmelCase__ = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(A ) == k - 1: return pivot # pivot is in elements bigger than k elif len(A ) < k - 1: return kth_number(A , k - len(A ) - 1 ) # pivot is in elements smaller than k else: return kth_number(A , A ) if __name__ == "__main__": import doctest doctest.testmod()
90
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class a__ ( unittest.TestCase ): '''simple docstring''' lowercase__ : List[Any] = ViTImageProcessor if is_vision_available() else None @property def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = (3, 32, 1_28) lowerCAmelCase__ = tempfile.mkdtemp() # fmt: off lowerCAmelCase__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on lowerCAmelCase__ = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' ) lowerCAmelCase__ = { '''do_normalize''': False, '''do_resize''': True, '''image_processor_type''': '''ViTImageProcessor''', '''resample''': 3, '''size''': {'''height''': 32, '''width''': 1_28}, } lowerCAmelCase__ = os.path.join(self.tmpdirname , lowerCamelCase_ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(lowerCamelCase_ , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> Optional[int]: return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> Tuple: return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: shutil.rmtree(self.tmpdirname ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: lowerCAmelCase__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta ) lowerCAmelCase__ = Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) return image_input def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase_ ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , lowerCamelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowerCAmelCase__ = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 ) lowerCAmelCase__ = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCamelCase_ , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , lowerCamelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) lowerCAmelCase__ = self.prepare_image_inputs() lowerCAmelCase__ = image_processor(lowerCamelCase_ , return_tensors='''np''' ) lowerCAmelCase__ = processor(images=lowerCamelCase_ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) lowerCAmelCase__ = '''test''' lowerCAmelCase__ = processor(text=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer(lowerCamelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) lowerCAmelCase__ = '''test''' lowerCAmelCase__ = self.prepare_image_inputs() lowerCAmelCase__ = processor(text=lowerCamelCase_ , images=lowerCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] ) # test if it raises when no input is passed with pytest.raises(lowerCamelCase_ ): processor() def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase__ = processor.char_decode(lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.batch_decode(lowerCamelCase_ ) lowerCAmelCase__ = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok] self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) lowerCAmelCase__ = None lowerCAmelCase__ = self.prepare_image_inputs() lowerCAmelCase__ = processor(text=lowerCamelCase_ , images=lowerCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) lowerCAmelCase__ = torch.randn(1 , 27 , 38 ) lowerCAmelCase__ = torch.randn(1 , 27 , 5_02_57 ) lowerCAmelCase__ = torch.randn(1 , 27 , 3_05_22 ) lowerCAmelCase__ = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
90
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
90
1
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a__ ( a__ , unittest.TestCase ): '''simple docstring''' lowercase__ : str = LongformerTokenizer lowercase__ : Optional[Any] = True lowercase__ : Optional[int] = LongformerTokenizerFast lowercase__ : Tuple = True def __SCREAMING_SNAKE_CASE ( self ) -> str: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase__ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowerCAmelCase__ = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) lowerCAmelCase__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCAmelCase__ = {'''unk_token''': '''<unk>'''} lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowerCamelCase_ ) ) def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: lowerCAmelCase__ = '''lower newer''' lowerCAmelCase__ = '''lower newer''' return input_text, output_text def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCAmelCase__ = '''lower newer''' lowerCAmelCase__ = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ ) # , add_prefix_space=True) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = tokens + [tokenizer.unk_token] lowerCAmelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: lowerCAmelCase__ = self.get_tokenizer() self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowerCamelCase_ ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowerCamelCase_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' ) lowerCAmelCase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.encode( '''sequence builders''' , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = '''Encode this sequence.''' lowerCAmelCase__ = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]] # Testing encoder arguments lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} ) lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ ) # Testing spaces after special tokens lowerCAmelCase__ = '''<mask>''' tokenizer.add_special_tokens( {'''mask_token''': AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )} ) # mask token has a left space lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) lowerCAmelCase__ = '''Encode <mask> sequence''' lowerCAmelCase__ = '''Encode <mask>sequence''' lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ ) lowerCAmelCase__ = encoded.index(lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ ) lowerCAmelCase__ = encoded.index(lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: pass def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) lowerCAmelCase__ = '''A, <mask> AllenNLP sentence.''' lowerCAmelCase__ = tokenizer_r.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer_p.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) lowerCAmelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) lowerCAmelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( lowerCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( lowerCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ ) lowerCAmelCase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) lowerCAmelCase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowerCamelCase_ ) self.assertEqual(post_processor_state['''add_prefix_space'''] , lowerCamelCase_ ) self.assertEqual(post_processor_state['''trim_offsets'''] , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCAmelCase__ = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` lowerCAmelCase__ = F"""{text_of_1_token} {text_of_1_token}""" lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , ) lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , ) lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCamelCase_ ), len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , ) lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCamelCase_ ), len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , ) lowerCAmelCase__ = F""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ) + 1, 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , ) lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ), 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , ) lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ), 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
90
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __UpperCAmelCase = TypeVar('''KEY''') __UpperCAmelCase = TypeVar('''VAL''') @dataclass(frozen=a__ , slots=a__ ) class a__ ( Generic[KEY, VAL] ): '''simple docstring''' lowercase__ : KEY lowercase__ : VAL class a__ ( _Item ): '''simple docstring''' def __init__( self ) -> None: super().__init__(lowerCamelCase_ , lowerCamelCase_ ) def __bool__( self ) -> bool: return False __UpperCAmelCase = _DeletedItem() class a__ ( MutableMapping[KEY, VAL] ): '''simple docstring''' def __init__( self , lowerCamelCase_ = 8 , lowerCamelCase_ = 0.75 ) -> None: lowerCAmelCase__ = initial_block_size lowerCAmelCase__ = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 lowerCAmelCase__ = capacity_factor lowerCAmelCase__ = 0 def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return hash(lowerCamelCase_ ) % len(self._buckets ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return (ind + 1) % len(self._buckets ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool: lowerCAmelCase__ = self._buckets[ind] if not stored: lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ ) self._len += 1 return True elif stored.key == key: lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ ) return True else: return False def __SCREAMING_SNAKE_CASE ( self ) -> bool: lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor return len(self ) >= int(lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> bool: if len(self._buckets ) <= self._initial_block_size: return False lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None: lowerCAmelCase__ = self._buckets lowerCAmelCase__ = [None] * new_size lowerCAmelCase__ = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def __SCREAMING_SNAKE_CASE ( self ) -> None: self._resize(len(self._buckets ) * 2 ) def __SCREAMING_SNAKE_CASE ( self ) -> None: self._resize(len(self._buckets ) // 2 ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterator[int]: lowerCAmelCase__ = self._get_bucket_index(lowerCamelCase_ ) for _ in range(len(self._buckets ) ): yield ind lowerCAmelCase__ = self._get_next_ind(lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None: for ind in self._iterate_buckets(lowerCamelCase_ ): if self._try_set(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): break def __setitem__( self , lowerCamelCase_ , lowerCamelCase_ ) -> None: if self._is_full(): self._size_up() self._add_item(lowerCamelCase_ , lowerCamelCase_ ) def __delitem__( self , lowerCamelCase_ ) -> None: for ind in self._iterate_buckets(lowerCamelCase_ ): lowerCAmelCase__ = self._buckets[ind] if item is None: raise KeyError(lowerCamelCase_ ) if item is _deleted: continue if item.key == key: lowerCAmelCase__ = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , lowerCamelCase_ ) -> VAL: for ind in self._iterate_buckets(lowerCamelCase_ ): lowerCAmelCase__ = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(lowerCamelCase_ ) def __len__( self ) -> int: return self._len def __iter__( self ) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self ) -> str: lowerCAmelCase__ = ''' ,'''.join( F"""{item.key}: {item.val}""" for item in self._buckets if item ) return F"""HashMap({val_string})"""
90
1
'''simple docstring''' import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _snake_case ( A , A ) -> Union[str, Any]: assert isinstance(A , A ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def _snake_case ( A , A , A ) -> Any: lowerCAmelCase__ = tmp_path / '''cache''' lowerCAmelCase__ = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ = TextDatasetReader(A , cache_dir=A , keep_in_memory=A ).read() _check_text_dataset(A , A ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def _snake_case ( A , A , A ) -> int: lowerCAmelCase__ = tmp_path / '''cache''' lowerCAmelCase__ = {'''text''': '''string'''} lowerCAmelCase__ = features.copy() if features else default_expected_features lowerCAmelCase__ = ( Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ = TextDatasetReader(A , features=A , cache_dir=A ).read() _check_text_dataset(A , A ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def _snake_case ( A , A , A ) -> List[str]: lowerCAmelCase__ = tmp_path / '''cache''' lowerCAmelCase__ = {'''text''': '''string'''} lowerCAmelCase__ = TextDatasetReader(A , cache_dir=A , split=A ).read() _check_text_dataset(A , A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def _snake_case ( A , A , A ) -> str: if issubclass(A , A ): lowerCAmelCase__ = text_path elif issubclass(A , A ): lowerCAmelCase__ = [text_path] lowerCAmelCase__ = tmp_path / '''cache''' lowerCAmelCase__ = {'''text''': '''string'''} lowerCAmelCase__ = TextDatasetReader(A , cache_dir=A ).read() _check_text_dataset(A , A ) def _snake_case ( A , A , A=("train",) ) -> Any: assert isinstance(A , A ) for split in splits: lowerCAmelCase__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def _snake_case ( A , A , A ) -> Tuple: lowerCAmelCase__ = tmp_path / '''cache''' lowerCAmelCase__ = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ = TextDatasetReader({'''train''': text_path} , cache_dir=A , keep_in_memory=A ).read() _check_text_datasetdict(A , A ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def _snake_case ( A , A , A ) -> Union[str, Any]: lowerCAmelCase__ = tmp_path / '''cache''' # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" lowerCAmelCase__ = {'''text''': '''string'''} lowerCAmelCase__ = features.copy() if features else default_expected_features lowerCAmelCase__ = ( Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ = TextDatasetReader({'''train''': text_path} , features=A , cache_dir=A ).read() _check_text_datasetdict(A , A ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def _snake_case ( A , A , A ) -> Any: if split: lowerCAmelCase__ = {split: text_path} else: lowerCAmelCase__ = '''train''' lowerCAmelCase__ = {'''train''': text_path, '''test''': text_path} lowerCAmelCase__ = tmp_path / '''cache''' lowerCAmelCase__ = {'''text''': '''string'''} lowerCAmelCase__ = TextDatasetReader(A , cache_dir=A ).read() _check_text_datasetdict(A , A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
90
'''simple docstring''' import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def _snake_case ( A , A , A ) -> Union[str, Any]: lowerCAmelCase__ = OmegaConf.load(A ) lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )['''model'''] lowerCAmelCase__ = list(state_dict.keys() ) # extract state_dict for VQVAE lowerCAmelCase__ = {} lowerCAmelCase__ = '''first_stage_model.''' for key in keys: if key.startswith(A ): lowerCAmelCase__ = state_dict[key] # extract state_dict for UNetLDM lowerCAmelCase__ = {} lowerCAmelCase__ = '''model.diffusion_model.''' for key in keys: if key.startswith(A ): lowerCAmelCase__ = state_dict[key] lowerCAmelCase__ = config.model.params.first_stage_config.params lowerCAmelCase__ = config.model.params.unet_config.params lowerCAmelCase__ = VQModel(**A ).eval() vqvae.load_state_dict(A ) lowerCAmelCase__ = UNetLDMModel(**A ).eval() unet.load_state_dict(A ) lowerCAmelCase__ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=A , ) lowerCAmelCase__ = LDMPipeline(A , A , A ) pipeline.save_pretrained(A ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', type=str, required=True) parser.add_argument('''--config_path''', type=str, required=True) parser.add_argument('''--output_path''', type=str, required=True) __UpperCAmelCase = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
90
1
'''simple docstring''' import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def _snake_case ( A , A , A , A , A = None , A = None , A = None , ) -> Optional[Any]: if config_name_or_path is None: lowerCAmelCase__ = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base''' if generator_tokenizer_name_or_path is None: lowerCAmelCase__ = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: lowerCAmelCase__ = question_encoder_name_or_path lowerCAmelCase__ = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration # Save model. lowerCAmelCase__ = RagConfig.from_pretrained(A ) lowerCAmelCase__ = AutoConfig.from_pretrained(A ) lowerCAmelCase__ = AutoConfig.from_pretrained(A ) lowerCAmelCase__ = gen_config lowerCAmelCase__ = question_encoder_config lowerCAmelCase__ = model_class.from_pretrained_question_encoder_generator( A , A , config=A ) rag_model.save_pretrained(A ) # Sanity check. model_class.from_pretrained(A ) # Save tokenizers. lowerCAmelCase__ = AutoTokenizer.from_pretrained(A ) gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' ) lowerCAmelCase__ = AutoTokenizer.from_pretrained(A ) question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--model_type''', choices=['''rag_sequence''', '''rag_token'''], required=True, type=str, help='''RAG model type: rag_sequence, rag_token''', ) parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''') parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''') parser.add_argument( '''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier''' ) parser.add_argument( '''--generator_tokenizer_name_or_path''', type=str, help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''', ) parser.add_argument( '''--question_encoder_tokenizer_name_or_path''', type=str, help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''', ) parser.add_argument( '''--config_name_or_path''', type=str, help=( '''Identifier of the model config to use, if not provided, resolves to a base config for a given''' ''' ``model_type``''' ), ) __UpperCAmelCase = parser.parse_args() __UpperCAmelCase = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
90
'''simple docstring''' import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __UpperCAmelCase = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class a__ ( a__ ): '''simple docstring''' lowercase__ : bool = field(default=a__ , metadata={"help": "Whether to use SortishSampler or not."} ) lowercase__ : bool = field( default=a__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowercase__ : Optional[int] = field( default=a__ , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowercase__ : Optional[int] = field( default=a__ , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowercase__ : Optional[Union[str, Path, GenerationConfig]] = field( default=a__ , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: lowerCAmelCase__ = super().to_dict() for k, v in d.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): lowerCAmelCase__ = v.to_dict() return d
90
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right __UpperCAmelCase = 250_004 __UpperCAmelCase = 250_020 @require_sentencepiece @require_tokenizers class a__ ( a__ , unittest.TestCase ): '''simple docstring''' lowercase__ : List[str] = MBartaaTokenizer lowercase__ : Union[str, Any] = MBartaaTokenizerFast lowercase__ : Tuple = True lowercase__ : Optional[Any] = True def __SCREAMING_SNAKE_CASE ( self ) -> Any: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = '''<s>''' lowerCAmelCase__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(lowerCamelCase_ ) , 10_54 ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: self.assertEqual(self.get_tokenizer().vocab_size , 10_54 ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> int: # fmt: off lowerCAmelCase__ = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase_ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , ) def __SCREAMING_SNAKE_CASE ( self ) -> str: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCAmelCase__ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) lowerCAmelCase__ = tempfile.mkdtemp() lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ ) lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) lowerCAmelCase__ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ ) # Checks everything loads correctly in the same way lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ ) lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowerCamelCase_ ) # Save tokenizer rust, legacy_format=True lowerCAmelCase__ = tempfile.mkdtemp() lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ ) # Checks it save with the same files self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ ) # Checks everything loads correctly in the same way lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ ) lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) ) shutil.rmtree(lowerCamelCase_ ) # Save tokenizer rust, legacy_format=False lowerCAmelCase__ = tempfile.mkdtemp() lowerCAmelCase__ = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ ) lowerCAmelCase__ = tokenizer_p.save_pretrained(lowerCamelCase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCAmelCase__ = tokenizer_r.from_pretrained(lowerCamelCase_ ) lowerCAmelCase__ = tokenizer_p.from_pretrained(lowerCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) ) shutil.rmtree(lowerCamelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class a__ ( unittest.TestCase ): '''simple docstring''' lowercase__ : Union[str, Any] = "facebook/mbart-large-50-one-to-many-mmt" lowercase__ : List[Any] = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] lowercase__ : int = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] lowercase__ : Tuple = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def __SCREAMING_SNAKE_CASE ( cls ) -> int: lowerCAmelCase__ = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) lowerCAmelCase__ = 1 return cls def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 ) def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids ) lowerCAmelCase__ = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2] lowerCAmelCase__ = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , lowerCamelCase_ ) lowerCAmelCase__ = 10 lowerCAmelCase__ = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0] self.assertEqual(ids[0] , lowerCamelCase_ ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = tempfile.mkdtemp() lowerCAmelCase__ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCamelCase_ ) lowerCAmelCase__ = MBartaaTokenizer.from_pretrained(lowerCamelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ ) @require_torch def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: lowerCAmelCase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , return_tensors='''pt''' ) lowerCAmelCase__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) lowerCAmelCase__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) lowerCAmelCase__ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors='''pt''' ) lowerCAmelCase__ = self.tokenizer( text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors='''pt''' ) lowerCAmelCase__ = targets['''input_ids'''] lowerCAmelCase__ = shift_tokens_right(lowerCamelCase_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: lowerCAmelCase__ = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(lowerCamelCase_ ) , { # en_XX, A, test, EOS '''input_ids''': [[25_00_04, 62, 30_34, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 25_00_01, } , )
90
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device __UpperCAmelCase = False class a__ ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe.dual_guided( prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase_ ) lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = generator.manual_seed(0 ) lowerCAmelCase__ = pipe.dual_guided( prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = '''cyberpunk 2077''' lowerCAmelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe.dual_guided( prompt=lowerCamelCase_ , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 lowerCAmelCase__ = '''A painting of a squirrel eating a burger ''' lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe.text_to_image( prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 lowerCAmelCase__ = pipe.image_variation(lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''numpy''' ).images lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
90
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'''vocab_file''': '''vocab.txt'''} __UpperCAmelCase = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } __UpperCAmelCase = { '''YituTech/conv-bert-base''': 512, '''YituTech/conv-bert-medium-small''': 512, '''YituTech/conv-bert-small''': 512, } __UpperCAmelCase = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class a__ ( a__ ): '''simple docstring''' lowercase__ : List[Any] = VOCAB_FILES_NAMES lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION lowercase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ : Union[str, Any] = ConvBertTokenizer def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_="[UNK]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="[PAD]" , lowerCamelCase_="[CLS]" , lowerCamelCase_="[MASK]" , lowerCamelCase_=True , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Dict: super().__init__( lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , ) lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , lowerCamelCase_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , lowerCamelCase_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase_ ) != tokenize_chinese_chars ): lowerCAmelCase__ = getattr(lowerCamelCase_ , normalizer_state.pop('''type''' ) ) lowerCAmelCase__ = do_lower_case lowerCAmelCase__ = strip_accents lowerCAmelCase__ = tokenize_chinese_chars lowerCAmelCase__ = normalizer_class(**lowerCamelCase_ ) lowerCAmelCase__ = do_lower_case def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=None ) -> Optional[Any]: lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]: lowerCAmelCase__ = [self.sep_token_id] lowerCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple[str]: lowerCAmelCase__ = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ ) return tuple(lowerCamelCase_ )
90
'''simple docstring''' from __future__ import annotations def _snake_case ( A ) -> bool: lowerCAmelCase__ = str(A ) return len(A ) == 9 and set(A ) == set('''123456789''' ) def _snake_case ( ) -> int | None: for base_num in range(9999 , 4999 , -1 ): lowerCAmelCase__ = 100002 * base_num if is_9_pandigital(A ): return candidate for base_num in range(333 , 99 , -1 ): lowerCAmelCase__ = 1002003 * base_num if is_9_pandigital(A ): return candidate return None if __name__ == "__main__": print(f"""{solution() = }""")
90
1
'''simple docstring''' from __future__ import annotations def _snake_case ( A ) -> list[int]: lowerCAmelCase__ = 2 lowerCAmelCase__ = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(A ) if n > 1: factors.append(A ) return factors if __name__ == "__main__": import doctest doctest.testmod()
90
'''simple docstring''' # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES __UpperCAmelCase = '''tiny-wmt19-en-ru''' # Build # borrowed from a test __UpperCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] __UpperCAmelCase = dict(zip(vocab, range(len(vocab)))) __UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] with tempfile.TemporaryDirectory() as tmpdirname: __UpperCAmelCase = Path(tmpdirname) __UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file'''] __UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file'''] __UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file'''] with open(src_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, '''w''') as fp: fp.write('''\n'''.join(merges)) __UpperCAmelCase = FSMTTokenizer( langs=['''en''', '''ru'''], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) __UpperCAmelCase = FSMTConfig( langs=['''ru''', '''en'''], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) __UpperCAmelCase = FSMTForConditionalGeneration(config) print(f"""num of params {tiny_model.num_parameters()}""") # Test __UpperCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''') __UpperCAmelCase = tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"""Generated {mname_tiny}""") # Upload # transformers-cli upload tiny-wmt19-en-ru
90
1
'''simple docstring''' __UpperCAmelCase = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' __UpperCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] __UpperCAmelCase = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
90
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def _snake_case ( ) -> Union[str, Any]: raise RuntimeError('''CUDA out of memory.''' ) class a__ ( nn.Module ): '''simple docstring''' def __init__( self ) -> int: super().__init__() lowerCAmelCase__ = nn.Linear(3 , 4 ) lowerCAmelCase__ = nn.BatchNormad(4 ) lowerCAmelCase__ = nn.Linear(4 , 5 ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]: return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_ ) ) ) class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(lowerCamelCase_ ): nonlocal batch_sizes batch_sizes.append(lowerCamelCase_ ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ ): nonlocal batch_sizes batch_sizes.append(lowerCamelCase_ ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga lowerCAmelCase__ , lowerCAmelCase__ = mock_training_loop_function('''hello''' ) self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(lowerCamelCase_ ): pass with self.assertRaises(lowerCamelCase_ ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(lowerCamelCase_ ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(lowerCamelCase_ ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(lowerCamelCase_ ) as cm: mock_training_loop_function(1_28 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(lowerCamelCase_ ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(lowerCamelCase_ ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = torch.cuda.memory_allocated() lowerCAmelCase__ = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_ ) lowerCAmelCase__ = release_memory(lowerCamelCase_ ) self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_ )
90
1
'''simple docstring''' from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class a__ ( a__ ): '''simple docstring''' def __init__( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> Optional[int]: super().__init__( lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , ) lowerCAmelCase__ = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths} lowerCAmelCase__ = Text( cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , ) def __SCREAMING_SNAKE_CASE ( self ) -> int: # Build iterable dataset if self.streaming: lowerCAmelCase__ = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = None self.builder.download_and_prepare( download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , ) lowerCAmelCase__ = self.builder.as_dataset( split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory ) return dataset
90
'''simple docstring''' import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __UpperCAmelCase = logging.getLogger(__name__) def _snake_case ( A , A , A = None , A = None , A = None , A = None , A = None , A = False , ) -> Union[str, Any]: lowerCAmelCase__ = bnb_quantization_config.load_in_abit lowerCAmelCase__ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( '''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,''' ''' make sure you have the latest version of `bitsandbytes` installed.''' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( '''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,''' '''make sure you have the latest version of `bitsandbytes` installed.''' ) lowerCAmelCase__ = [] # custom device map if isinstance(A , A ) and len(device_map.keys() ) > 1: lowerCAmelCase__ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: lowerCAmelCase__ = get_keys_to_not_convert(A ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(A ) lowerCAmelCase__ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: lowerCAmelCase__ = [] lowerCAmelCase__ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(A ) # compatibility with peft lowerCAmelCase__ = load_in_abit lowerCAmelCase__ = load_in_abit lowerCAmelCase__ = get_parameter_device(A ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( '''It is not recommended to quantize a loaded model. ''' '''The model should be instantiated under the `init_empty_weights` context manager.''' ) lowerCAmelCase__ = replace_with_bnb_layers(A , A , modules_to_not_convert=A ) # convert param to the right dtype lowerCAmelCase__ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: lowerCAmelCase__ = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' ) lowerCAmelCase__ = getattr(A , A , A ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(A ): param.to(A ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info( F"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" '''We move the model to cuda.''' ) return model elif weights_location is None: raise RuntimeError( F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): lowerCAmelCase__ = replace_with_bnb_layers( A , A , modules_to_not_convert=A ) lowerCAmelCase__ = get_quantized_model_device_map( A , A , A , max_memory=A , no_split_module_classes=A , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): lowerCAmelCase__ = True lowerCAmelCase__ = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] ) load_checkpoint_in_model( A , A , A , dtype=bnb_quantization_config.torch_dtype , offload_folder=A , offload_state_dict=A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(A , device_map=A , offload_dir=A ) def _snake_case ( A , A , A=None , A=None , A=None ) -> List[Any]: if device_map is None: if torch.cuda.is_available(): lowerCAmelCase__ = {'''''': torch.cuda.current_device()} else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' ) if isinstance(A , A ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( '''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ''' '''\'sequential\'.''' ) lowerCAmelCase__ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) lowerCAmelCase__ = {} lowerCAmelCase__ = special_dtypes lowerCAmelCase__ = no_split_module_classes lowerCAmelCase__ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": lowerCAmelCase__ = get_balanced_memory( A , low_zero=(device_map == '''balanced_low_0''') , max_memory=A , **A , ) lowerCAmelCase__ = max_memory lowerCAmelCase__ = infer_auto_device_map(A , **A ) if isinstance(A , A ): # check if don't have any quantized module on the cpu lowerCAmelCase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules lowerCAmelCase__ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( ''' Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. ''' ) else: logger.info( '''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' ) del device_map_without_some_modules return device_map def _snake_case ( A , A , A=None , A=None ) -> Any: if modules_to_not_convert is None: lowerCAmelCase__ = [] lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers( A , A , A , A ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def _snake_case ( A , A , A=None , A=None , ) -> Optional[Any]: lowerCAmelCase__ = False for name, module in model.named_children(): if current_key_name is None: lowerCAmelCase__ = [] current_key_name.append(A ) if isinstance(A , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` lowerCAmelCase__ = '''.'''.join(A ) lowerCAmelCase__ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: lowerCAmelCase__ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: lowerCAmelCase__ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: lowerCAmelCase__ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' ) lowerCAmelCase__ = module.weight.data if module.bias is not None: lowerCAmelCase__ = module.bias.data bnb_module.requires_grad_(A ) setattr(A , A , A ) lowerCAmelCase__ = True if len(list(module.children() ) ) > 0: lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers( A , A , A , A ) lowerCAmelCase__ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _snake_case ( A ) -> Tuple: # Create a copy of the model with init_empty_weights(): lowerCAmelCase__ = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager` lowerCAmelCase__ = find_tied_parameters(A ) # For compatibility with Accelerate < 0.18 if isinstance(A , A ): lowerCAmelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowerCAmelCase__ = sum(A , [] ) lowerCAmelCase__ = len(A ) > 0 # Check if it is a base model lowerCAmelCase__ = False if hasattr(A , '''base_model_prefix''' ): lowerCAmelCase__ = not hasattr(A , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowerCAmelCase__ = list(model.named_children() ) lowerCAmelCase__ = [list_modules[-1][0]] # add last module together with tied weights lowerCAmelCase__ = set(A ) - set(A ) lowerCAmelCase__ = list(set(A ) ) + list(A ) # remove ".weight" from the keys lowerCAmelCase__ = ['''.weight''', '''.bias'''] lowerCAmelCase__ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowerCAmelCase__ = name.replace(A , '''''' ) filtered_module_names.append(A ) return filtered_module_names def _snake_case ( A ) -> Optional[int]: for m in model.modules(): if isinstance(A , bnb.nn.Linearabit ): return True return False def _snake_case ( A ) -> Union[str, Any]: return next(parameter.parameters() ).device def _snake_case ( A , A , A , A , A , A , A ) -> Any: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(A , A , 0 , dtype=A , value=A ) lowerCAmelCase__ = param_name lowerCAmelCase__ = model if "." in tensor_name: lowerCAmelCase__ = tensor_name.split('''.''' ) for split in splits[:-1]: lowerCAmelCase__ = getattr(A , A ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) lowerCAmelCase__ = new_module lowerCAmelCase__ = splits[-1] # offload weights lowerCAmelCase__ = False offload_weight(module._parameters[tensor_name] , A , A , index=A ) if hasattr(module._parameters[tensor_name] , '''SCB''' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A , index=A , ) else: offload_weight(A , A , A , index=A ) offload_weight(A , param_name.replace('''weight''' , '''SCB''' ) , A , index=A ) set_module_tensor_to_device(A , A , '''meta''' , dtype=A , value=torch.empty(*param.size() ) )
90
1
'''simple docstring''' import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu __UpperCAmelCase = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json''' with io.open(filename, '''r''', encoding='''utf-8''') as f: __UpperCAmelCase = json.load(f) @require_torch class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str: return FSMTTokenizer.from_pretrained(lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: lowerCAmelCase__ = FSMTForConditionalGeneration.from_pretrained(lowerCamelCase_ ).to(lowerCamelCase_ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['''en-ru''', 26.0], ['''ru-en''', 22.0], ['''en-de''', 22.0], ['''de-en''', 29.0], ] ) @slow def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple: # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality lowerCAmelCase__ = F"""facebook/wmt19-{pair}""" lowerCAmelCase__ = self.get_tokenizer(lowerCamelCase_ ) lowerCAmelCase__ = self.get_model(lowerCamelCase_ ) lowerCAmelCase__ = bleu_data[pair]['''src'''] lowerCAmelCase__ = bleu_data[pair]['''tgt'''] lowerCAmelCase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' , truncation=lowerCamelCase_ , padding='''longest''' ).to(lowerCamelCase_ ) lowerCAmelCase__ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) lowerCAmelCase__ = tokenizer.batch_decode( lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ ) lowerCAmelCase__ = calculate_bleu(lowerCamelCase_ , lowerCamelCase_ ) print(lowerCamelCase_ ) self.assertGreaterEqual(scores['''bleu'''] , lowerCamelCase_ )
90
'''simple docstring''' from collections.abc import Callable import numpy as np def _snake_case ( A , A , A , A , A ) -> np.array: lowerCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) ) lowerCAmelCase__ = np.zeros((n + 1,) ) lowerCAmelCase__ = ya lowerCAmelCase__ = xa for k in range(A ): lowerCAmelCase__ = y[k] + step_size * ode_func(A , y[k] ) lowerCAmelCase__ = y[k] + ( (step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
90
1
'''simple docstring''' from math import isqrt def _snake_case ( A ) -> bool: return all(number % divisor != 0 for divisor in range(2 , isqrt(A ) + 1 ) ) def _snake_case ( A = 10**6 ) -> int: lowerCAmelCase__ = 0 lowerCAmelCase__ = 1 lowerCAmelCase__ = 7 while prime_candidate < max_prime: primes_count += is_prime(A ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(f"""{solution() = }""")
90
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class a__ : '''simple docstring''' def __init__( self , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=64 , lowerCamelCase_=None ) -> Dict: lowerCAmelCase__ = np.random.default_rng(lowerCamelCase_ ) lowerCAmelCase__ = length lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa ) lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self ) -> Any: return self.length def __getitem__( self , lowerCamelCase_ ) -> List[str]: return {"x": self.x[i], "y": self.y[i]} class a__ ( torch.nn.Module ): '''simple docstring''' def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> List[Any]: super().__init__() lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase__ = True def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Optional[Any]: if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) lowerCAmelCase__ = False return x * self.a[0] + self.b[0] class a__ ( torch.nn.Module ): '''simple docstring''' def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> Any: super().__init__() lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() ) lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() ) lowerCAmelCase__ = True def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any: if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) lowerCAmelCase__ = False return x * self.a + self.b def _snake_case ( A , A = 16 ) -> Any: from datasets import load_dataset from transformers import AutoTokenizer lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowerCAmelCase__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} lowerCAmelCase__ = load_dataset('''csv''' , data_files=A ) lowerCAmelCase__ = datasets['''train'''].unique('''label''' ) lowerCAmelCase__ = {v: i for i, v in enumerate(A )} def tokenize_function(A ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase__ = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A , padding='''max_length''' ) if "label" in examples: lowerCAmelCase__ = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase__ = datasets.map( A , batched=A , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(A ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. lowerCAmelCase__ = DataLoader(tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=2 ) lowerCAmelCase__ = DataLoader(tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=1 ) return train_dataloader, eval_dataloader
90
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a__ ( a__ , a__ , a__ , unittest.TestCase ): '''simple docstring''' lowercase__ : Optional[Any] = AltDiffusionPipeline lowercase__ : Dict = TEXT_TO_IMAGE_PARAMS lowercase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS lowercase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS lowercase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS def __SCREAMING_SNAKE_CASE ( self ) -> str: torch.manual_seed(0 ) lowerCAmelCase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) lowerCAmelCase__ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , ) torch.manual_seed(0 ) lowerCAmelCase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) lowerCAmelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , ) lowerCAmelCase__ = CLIPTextModel(lowerCamelCase_ ) lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) lowerCAmelCase__ = 77 lowerCAmelCase__ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> List[str]: if str(lowerCamelCase_ ).startswith('''mps''' ): lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ ) else: lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) lowerCAmelCase__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ = self.get_dummy_components() torch.manual_seed(0 ) lowerCAmelCase__ = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , ) # TODO: remove after fixing the non-deterministic text encoder lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ ) lowerCAmelCase__ = text_encoder lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ ) lowerCAmelCase__ = '''A photo of an astronaut''' lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ = np.array( [0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ = self.get_dummy_components() lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) torch.manual_seed(0 ) lowerCAmelCase__ = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , ) # TODO: remove after fixing the non-deterministic text encoder lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ ) lowerCAmelCase__ = text_encoder lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ = np.array( [0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: # make sure here that pndm scheduler skips prk lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = '''A painting of a squirrel eating a burger''' lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __SCREAMING_SNAKE_CASE ( self ) -> Any: lowerCAmelCase__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' ) lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = '''A painting of a squirrel eating a burger''' lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
90
'''simple docstring''' import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __UpperCAmelCase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def _snake_case ( A , A=None ) -> Optional[Any]: require_version(deps[pkg] , A )
90
1
'''simple docstring''' import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a__ ( a__ , unittest.TestCase ): '''simple docstring''' lowercase__ : Optional[Any] = KandinskyVaaPriorPipeline lowercase__ : List[str] = ["prompt"] lowercase__ : Union[str, Any] = ["prompt", "negative_prompt"] lowercase__ : Optional[int] = [ "num_images_per_prompt", "generator", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] lowercase__ : str = False @property def __SCREAMING_SNAKE_CASE ( self ) -> Dict: return 32 @property def __SCREAMING_SNAKE_CASE ( self ) -> str: return 32 @property def __SCREAMING_SNAKE_CASE ( self ) -> int: return self.time_input_dim @property def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: return self.time_input_dim * 4 @property def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: return 1_00 @property def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: lowerCAmelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(lowerCamelCase_ ) @property def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase__ = { '''num_attention_heads''': 2, '''attention_head_dim''': 12, '''embedding_dim''': self.text_embedder_hidden_size, '''num_layers''': 1, } lowerCAmelCase__ = PriorTransformer(**lowerCamelCase_ ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 lowerCAmelCase__ = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def __SCREAMING_SNAKE_CASE ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase__ = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=2_24 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) lowerCAmelCase__ = CLIPVisionModelWithProjection(lowerCamelCase_ ) return model @property def __SCREAMING_SNAKE_CASE ( self ) -> Any: lowerCAmelCase__ = CLIPImageProcessor( crop_size=2_24 , do_center_crop=lowerCamelCase_ , do_normalize=lowerCamelCase_ , do_resize=lowerCamelCase_ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=2_24 , ) return image_processor def __SCREAMING_SNAKE_CASE ( self ) -> Dict: lowerCAmelCase__ = self.dummy_prior lowerCAmelCase__ = self.dummy_image_encoder lowerCAmelCase__ = self.dummy_text_encoder lowerCAmelCase__ = self.dummy_tokenizer lowerCAmelCase__ = self.dummy_image_processor lowerCAmelCase__ = UnCLIPScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=lowerCamelCase_ , clip_sample_range=10.0 , ) lowerCAmelCase__ = { '''prior''': prior, '''image_encoder''': image_encoder, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''scheduler''': scheduler, '''image_processor''': image_processor, } return components def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> int: if str(lowerCamelCase_ ).startswith('''mps''' ): lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ ) else: lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) lowerCAmelCase__ = { '''prompt''': '''horse''', '''generator''': generator, '''guidance_scale''': 4.0, '''num_inference_steps''': 2, '''output_type''': '''np''', } return inputs def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = '''cpu''' lowerCAmelCase__ = self.get_dummy_components() lowerCAmelCase__ = self.pipeline_class(**lowerCamelCase_ ) lowerCAmelCase__ = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) ) lowerCAmelCase__ = output.image_embeds lowerCAmelCase__ = pipe( **self.get_dummy_inputs(lowerCamelCase_ ) , return_dict=lowerCamelCase_ , )[0] lowerCAmelCase__ = image[0, -10:] lowerCAmelCase__ = image_from_tuple[0, -10:] assert image.shape == (1, 32) lowerCAmelCase__ = np.array( [-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: lowerCAmelCase__ = torch_device == '''cpu''' lowerCAmelCase__ = True lowerCAmelCase__ = False self._test_inference_batch_single_identical( test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , test_mean_pixel_difference=lowerCamelCase_ , ) @skip_mps def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: lowerCAmelCase__ = torch_device == '''cpu''' lowerCAmelCase__ = False self._test_attention_slicing_forward_pass( test_max_difference=lowerCamelCase_ , test_mean_pixel_difference=lowerCamelCase_ , )
90
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( A , A=False , A=False , A=False ) -> Union[str, Any]: lowerCAmelCase__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") ) # embeddings rename_keys.extend( [ # text embeddings ('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''), ( '''text_embeddings.position_embeddings.weight''', '''vilt.embeddings.text_embeddings.position_embeddings.weight''', ), ('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''), ( '''text_embeddings.token_type_embeddings.weight''', '''vilt.embeddings.text_embeddings.token_type_embeddings.weight''', ), ('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''), ('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''), # patch embeddings ('''transformer.cls_token''', '''vilt.embeddings.cls_token'''), ('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''), ('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''), ('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''), # token type embeddings ('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''), ] ) # final layernorm + pooler rename_keys.extend( [ ('''transformer.norm.weight''', '''vilt.layernorm.weight'''), ('''transformer.norm.bias''', '''vilt.layernorm.bias'''), ('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''), ('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('''vqa_classifier.0.weight''', '''classifier.0.weight'''), ('''vqa_classifier.0.bias''', '''classifier.0.bias'''), ('''vqa_classifier.1.weight''', '''classifier.1.weight'''), ('''vqa_classifier.1.bias''', '''classifier.1.bias'''), ('''vqa_classifier.3.weight''', '''classifier.3.weight'''), ('''vqa_classifier.3.bias''', '''classifier.3.bias'''), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''), ('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''), ('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''), ('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''), ('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''), ('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''), ] ) else: pass return rename_keys def _snake_case ( A , A ) -> List[str]: for i in range(config.num_hidden_layers ): lowerCAmelCase__ = '''vilt.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" ) lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase__ = in_proj_weight[ : config.hidden_size, : ] lowerCAmelCase__ = in_proj_bias[: config.hidden_size] lowerCAmelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCAmelCase__ = in_proj_bias[-config.hidden_size :] def _snake_case ( A ) -> List[str]: lowerCAmelCase__ = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(A , A ) def _snake_case ( A , A , A ) -> str: lowerCAmelCase__ = dct.pop(A ) lowerCAmelCase__ = val @torch.no_grad() def _snake_case ( A , A ) -> Any: lowerCAmelCase__ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=A ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False if "vqa" in checkpoint_url: lowerCAmelCase__ = True lowerCAmelCase__ = 3129 lowerCAmelCase__ = '''huggingface/label-files''' lowerCAmelCase__ = '''vqa2-id2label.json''' lowerCAmelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase__ = {int(A ): v for k, v in idalabel.items()} lowerCAmelCase__ = idalabel lowerCAmelCase__ = {v: k for k, v in idalabel.items()} lowerCAmelCase__ = ViltForQuestionAnswering(A ) elif "nlvr" in checkpoint_url: lowerCAmelCase__ = True lowerCAmelCase__ = 2 lowerCAmelCase__ = {0: '''False''', 1: '''True'''} lowerCAmelCase__ = {v: k for k, v in config.idalabel.items()} lowerCAmelCase__ = 3 lowerCAmelCase__ = ViltForImagesAndTextClassification(A ) elif "irtr" in checkpoint_url: lowerCAmelCase__ = True lowerCAmelCase__ = ViltForImageAndTextRetrieval(A ) elif "mlm_itm" in checkpoint_url: lowerCAmelCase__ = True lowerCAmelCase__ = ViltForMaskedLM(A ) else: raise ValueError('''Unknown model type''' ) # load state_dict of original model, remove and rename some keys lowerCAmelCase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''state_dict'''] lowerCAmelCase__ = create_rename_keys(A , A , A , A ) for src, dest in rename_keys: rename_key(A , A , A ) read_in_q_k_v(A , A ) if mlm_model or irtr_model: lowerCAmelCase__ = ['''itm_score.fc.weight''', '''itm_score.fc.bias'''] for k in ignore_keys: state_dict.pop(A , A ) # load state dict into HuggingFace model model.eval() if mlm_model: lowerCAmelCase__ , lowerCAmelCase__ = model.load_state_dict(A , strict=A ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(A ) # Define processor lowerCAmelCase__ = ViltImageProcessor(size=384 ) lowerCAmelCase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' ) lowerCAmelCase__ = ViltProcessor(A , A ) # Forward pass on example inputs (image + text) if nlvr_model: lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw ) lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw ) lowerCAmelCase__ = ( '''The left image contains twice the number of dogs as the right image, and at least two dogs in total are''' ''' standing.''' ) lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' ) lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' ) lowerCAmelCase__ = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: lowerCAmelCase__ = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=A ).raw ) if mlm_model: lowerCAmelCase__ = '''a bunch of [MASK] laying on a [MASK].''' else: lowerCAmelCase__ = '''How many cats are there?''' lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' ) lowerCAmelCase__ = model(**A ) # Verify outputs if mlm_model: lowerCAmelCase__ = torch.Size([1, 11, 30522] ) lowerCAmelCase__ = torch.tensor([-12.5_061, -12.5_123, -12.5_174] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 ) # verify masked token prediction equals "cats" lowerCAmelCase__ = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: lowerCAmelCase__ = torch.Size([1, 3129] ) lowerCAmelCase__ = torch.tensor([-15.9_495, -18.1_472, -10.3_041] ) assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 ) # verify vqa prediction equals "2" lowerCAmelCase__ = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: lowerCAmelCase__ = torch.Size([1, 2] ) lowerCAmelCase__ = torch.tensor([-2.8_721, 2.1_291] ) assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(A ).mkdir(exist_ok=A ) print(F"""Saving model and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(A ) processor.save_pretrained(A ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) __UpperCAmelCase = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
90
1
'''simple docstring''' import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a__ ( unittest.TestCase ): '''simple docstring''' def __init__( self , lowerCamelCase_ , lowerCamelCase_=3 , lowerCamelCase_=32 , lowerCamelCase_=3 , lowerCamelCase_=10 , lowerCamelCase_=[10, 20, 30, 40] , lowerCamelCase_=[1, 1, 2, 1] , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_="relu" , lowerCamelCase_=3 , lowerCamelCase_=None , ) -> Any: lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = image_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = embeddings_size lowerCAmelCase__ = hidden_sizes lowerCAmelCase__ = depths lowerCAmelCase__ = is_training lowerCAmelCase__ = use_labels lowerCAmelCase__ = hidden_act lowerCAmelCase__ = num_labels lowerCAmelCase__ = scope lowerCAmelCase__ = len(lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ = self.get_config() return config, pixel_values def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]: lowerCAmelCase__ = FlaxRegNetModel(config=lowerCamelCase_ ) lowerCAmelCase__ = model(lowerCamelCase_ ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Any: lowerCAmelCase__ = self.num_labels lowerCAmelCase__ = FlaxRegNetForImageClassification(config=lowerCamelCase_ ) lowerCAmelCase__ = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs lowerCAmelCase__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class a__ ( a__ , unittest.TestCase ): '''simple docstring''' lowercase__ : Any = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () lowercase__ : List[Any] = False lowercase__ : Union[str, Any] = False lowercase__ : Optional[int] = False def __SCREAMING_SNAKE_CASE ( self ) -> None: lowerCAmelCase__ = FlaxRegNetModelTester(self ) lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: return def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: pass @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: pass def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(lowerCamelCase_ ) lowerCAmelCase__ = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ = [*signature.parameters.keys()] lowerCAmelCase__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: def check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): lowerCAmelCase__ = model_class(lowerCamelCase_ ) lowerCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCAmelCase__ = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 ) lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase__ = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = model_class(lowerCamelCase_ ) @jax.jit def model_jitted(lowerCamelCase_ , **lowerCamelCase_ ): return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase__ = model_jitted(**lowerCamelCase_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase__ = model_jitted(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) ) for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) def _snake_case ( ) -> Union[str, Any]: lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_flax class a__ ( unittest.TestCase ): '''simple docstring''' @cached_property def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None @slow def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' ) lowerCAmelCase__ = self.default_image_processor lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''np''' ) lowerCAmelCase__ = model(**lowerCamelCase_ ) # verify the logits lowerCAmelCase__ = (1, 10_00) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) lowerCAmelCase__ = jnp.array([-0.4_180, -1.5_051, -3.4_836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
90
'''simple docstring''' import re def _snake_case ( A ) -> bool: lowerCAmelCase__ = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' ) if match := re.search(A , A ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator('''+918827897895'''))
90
1
'''simple docstring''' def _snake_case ( A , A ) -> int: return x if y == 0 else greatest_common_divisor(A , x % y ) def _snake_case ( A , A ) -> int: return (x * y) // greatest_common_divisor(A , A ) def _snake_case ( A = 20 ) -> int: lowerCAmelCase__ = 1 for i in range(1 , n + 1 ): lowerCAmelCase__ = lcm(A , A ) return g if __name__ == "__main__": print(f"""{solution() = }""")
90
'''simple docstring''' import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'''vocab_file''': '''vocab.txt'''} __UpperCAmelCase = { '''vocab_file''': { '''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''', '''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''', }, } __UpperCAmelCase = { '''facebook/esm2_t6_8M_UR50D''': 1_024, '''facebook/esm2_t12_35M_UR50D''': 1_024, } def _snake_case ( A ) -> Optional[Any]: with open(A , '''r''' ) as f: lowerCAmelCase__ = f.read().splitlines() return [l.strip() for l in lines] class a__ ( a__ ): '''simple docstring''' lowercase__ : Optional[Any] = VOCAB_FILES_NAMES lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_="<eos>" , **lowerCamelCase_ , ) -> Tuple: super().__init__(**lowerCamelCase_ ) lowerCAmelCase__ = load_vocab_file(lowerCamelCase_ ) lowerCAmelCase__ = dict(enumerate(self.all_tokens ) ) lowerCAmelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )} lowerCAmelCase__ = unk_token lowerCAmelCase__ = cls_token lowerCAmelCase__ = pad_token lowerCAmelCase__ = mask_token lowerCAmelCase__ = eos_token lowerCAmelCase__ = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str: return self._id_to_token.get(lowerCamelCase_ , self.unk_token ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]: return text.split() def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False ) -> Dict: return len(self._id_to_token ) def __SCREAMING_SNAKE_CASE ( self ) -> int: return {token: i for i, token in enumerate(self.all_tokens )} def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str: return self._id_to_token.get(lowerCamelCase_ , self.unk_token ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]: lowerCAmelCase__ = [self.cls_token_id] lowerCAmelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] lowerCAmelCase__ = [1] + ([0] * len(lowerCamelCase_ )) + [1] if token_ids_a is not None: mask += [0] * len(lowerCamelCase_ ) + [1] return mask def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]: lowerCAmelCase__ = os.path.join(lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' ) with open(lowerCamelCase_ , '''w''' ) as f: f.write('''\n'''.join(self.all_tokens ) ) return (vocab_file,) @property def __SCREAMING_SNAKE_CASE ( self ) -> int: return self.get_vocab_size(with_added_tokens=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False ) -> int: return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ )
90
1
'''simple docstring''' import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class a__ ( a__ ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> Any: lowerCAmelCase__ = pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type , pa.intaa() ) def __SCREAMING_SNAKE_CASE ( self ) -> str: with self.assertRaises(lowerCamelCase_ ): lowerCAmelCase__ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: with self.assertRaises(lowerCamelCase_ ): lowerCAmelCase__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''bool''' ) , type=Value('''int64''' ) ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = pa.array(TypedSequence([1, 2, 3] , type=Value('''int32''' ) ) ) self.assertEqual(arr.type , pa.intaa() ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): lowerCAmelCase__ = pa.array(TypedSequence(['''foo''', '''bar'''] , type=Value('''int64''' ) ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''int32''' ) ) ) self.assertEqual(arr.type , pa.intaa() ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: lowerCAmelCase__ = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=Value('''int64''' ) ) ) self.assertEqual(arr.type , pa.string() ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , '''int64''' ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) ) def __SCREAMING_SNAKE_CASE ( self ) -> str: with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): lowerCAmelCase__ = pa.array(TypedSequence(['''foo''', '''bar'''] , type=ArrayaD((1, 3) , '''int64''' ) ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , '''int64''' ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=ArrayaD((1, 3) , '''int64''' ) ) ) self.assertEqual(arr.type , pa.string() ) @require_pil def __SCREAMING_SNAKE_CASE ( self ) -> Dict: import PIL.Image lowerCAmelCase__ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) ) with patch( '''datasets.arrow_writer.cast_to_python_objects''' , side_effect=lowerCamelCase_ ) as mock_cast_to_python_objects: lowerCAmelCase__ = pa.array(TypedSequence([{'''path''': None, '''bytes''': b'''image_bytes'''}, pil_image] , type=Image() ) ) lowerCAmelCase__ , lowerCAmelCase__ = mock_cast_to_python_objects.call_args_list[-1] self.assertIn('''optimize_list_casting''' , lowerCamelCase_ ) self.assertFalse(kwargs['''optimize_list_casting'''] ) def _snake_case ( A , A ) -> str: lowerCAmelCase__ = pa.BufferReader(A ) if isinstance(A , pa.Buffer ) else pa.memory_map(A ) lowerCAmelCase__ = pa.ipc.open_stream(A ) lowerCAmelCase__ = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] ) @pytest.mark.parametrize( '''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] ) def _snake_case ( A , A ) -> Tuple: lowerCAmelCase__ = pa.BufferOutputStream() lowerCAmelCase__ = pa.schema(A ) if fields else None with ArrowWriter(stream=A , schema=A , writer_batch_size=A ) as writer: writer.write({'''col_1''': '''foo''', '''col_2''': 1} ) writer.write({'''col_1''': '''bar''', '''col_2''': 2} ) lowerCAmelCase__ , lowerCAmelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: lowerCAmelCase__ = {'''col_1''': pa.string(), '''col_2''': pa.intaa()} assert writer._schema == pa.schema(A , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def _snake_case ( ) -> Dict: lowerCAmelCase__ = pa.BufferOutputStream() lowerCAmelCase__ = Features({'''labels''': ClassLabel(names=['''neg''', '''pos'''] )} ) with ArrowWriter(stream=A , features=A ) as writer: writer.write({'''labels''': 0} ) writer.write({'''labels''': 1} ) lowerCAmelCase__ , lowerCAmelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata lowerCAmelCase__ = pa.BufferReader(output.getvalue() ) lowerCAmelCase__ = pa.ipc.open_stream(A ) lowerCAmelCase__ = f.read_all() lowerCAmelCase__ = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(A ) @pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] ) def _snake_case ( A ) -> Optional[int]: lowerCAmelCase__ = pa.BufferOutputStream() with ArrowWriter( stream=A , writer_batch_size=A , hash_salt='''split_name''' , check_duplicates=A , ) as writer: with pytest.raises(A ): writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=[1, 2] ) lowerCAmelCase__ , lowerCAmelCase__ = writer.finalize() @pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] ) def _snake_case ( A ) -> int: lowerCAmelCase__ = pa.BufferOutputStream() with ArrowWriter( stream=A , writer_batch_size=A , hash_salt='''split_name''' , check_duplicates=A , ) as writer: with pytest.raises(A ): writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=10 ) writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=10 ) lowerCAmelCase__ , lowerCAmelCase__ = writer.finalize() @pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] ) def _snake_case ( A ) -> int: lowerCAmelCase__ = pa.BufferOutputStream() with ArrowWriter( stream=A , writer_batch_size=A , hash_salt='''split_name''' , check_duplicates=A , ) as writer: writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=1 ) writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=2 ) lowerCAmelCase__ , lowerCAmelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] ) @pytest.mark.parametrize( '''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] ) def _snake_case ( A , A ) -> Dict: lowerCAmelCase__ = pa.BufferOutputStream() lowerCAmelCase__ = pa.schema(A ) if fields else None with ArrowWriter(stream=A , schema=A , writer_batch_size=A ) as writer: writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) writer.write_batch({'''col_1''': [], '''col_2''': []} ) lowerCAmelCase__ , lowerCAmelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: lowerCAmelCase__ = {'''col_1''': pa.string(), '''col_2''': pa.intaa()} assert writer._schema == pa.schema(A , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] ) @pytest.mark.parametrize( '''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] ) def _snake_case ( A , A ) -> int: lowerCAmelCase__ = pa.BufferOutputStream() lowerCAmelCase__ = pa.schema(A ) if fields else None with ArrowWriter(stream=A , schema=A , writer_batch_size=A ) as writer: writer.write_table(pa.Table.from_pydict({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) ) lowerCAmelCase__ , lowerCAmelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: lowerCAmelCase__ = {'''col_1''': pa.string(), '''col_2''': pa.intaa()} assert writer._schema == pa.schema(A , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] ) @pytest.mark.parametrize( '''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] ) def _snake_case ( A , A ) -> Any: lowerCAmelCase__ = pa.BufferOutputStream() lowerCAmelCase__ = pa.schema(A ) if fields else None with ArrowWriter(stream=A , schema=A , writer_batch_size=A ) as writer: writer.write_row(pa.Table.from_pydict({'''col_1''': ['''foo'''], '''col_2''': [1]} ) ) writer.write_row(pa.Table.from_pydict({'''col_1''': ['''bar'''], '''col_2''': [2]} ) ) lowerCAmelCase__ , lowerCAmelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: lowerCAmelCase__ = {'''col_1''': pa.string(), '''col_2''': pa.intaa()} assert writer._schema == pa.schema(A , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def _snake_case ( ) -> Dict: with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ = {'''col_1''': pa.string(), '''col_2''': pa.intaa()} lowerCAmelCase__ = os.path.join(A , '''test.arrow''' ) with ArrowWriter(path=A , schema=pa.schema(A ) ) as writer: writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) lowerCAmelCase__ , lowerCAmelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(A , metadata=writer._schema.metadata ) _check_output(A , 1 ) def _snake_case ( A ) -> str: if pa.types.is_list(A ): return get_base_dtype(arr_type.value_type ) else: return arr_type def _snake_case ( A , A ) -> Tuple: if isinstance(lst[0] , A ): change_first_primitive_element_in_list(lst[0] , A ) else: lowerCAmelCase__ = value @pytest.mark.parametrize('''optimized_int_type, expected_dtype''' , [(None, pa.intaa()), (Value('''int32''' ), pa.intaa())] ) @pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def _snake_case ( A , A , A ) -> Optional[int]: lowerCAmelCase__ = pa.array(TypedSequence(A , optimized_int_type=A ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( '''col, expected_dtype''' , [ ('''attention_mask''', pa.inta()), ('''special_tokens_mask''', pa.inta()), ('''token_type_ids''', pa.inta()), ('''input_ids''', pa.intaa()), ('''other''', pa.intaa()), ] , ) @pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def _snake_case ( A , A , A ) -> Dict: # in range lowerCAmelCase__ = pa.array(OptimizedTypedSequence(A , col=A ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications lowerCAmelCase__ = copy.deepcopy(A ) lowerCAmelCase__ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(A , A ) lowerCAmelCase__ = pa.array(OptimizedTypedSequence(A , col=A ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize('''raise_exception''' , [False, True] ) def _snake_case ( A , A ) -> List[str]: lowerCAmelCase__ = str(tmp_path / '''dataset-train.arrow''' ) try: with ArrowWriter(path=A ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def _snake_case ( A ) -> Optional[int]: lowerCAmelCase__ = '''mock://dataset-train.arrow''' with ArrowWriter(path=A , storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs , type(A ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({'''col_1''': '''foo''', '''col_2''': 1} ) writer.write({'''col_1''': '''bar''', '''col_2''': 2} ) lowerCAmelCase__ , lowerCAmelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(A ) def _snake_case ( ) -> Union[str, Any]: lowerCAmelCase__ = pa.BufferOutputStream() with ParquetWriter(stream=A ) as writer: writer.write({'''col_1''': '''foo''', '''col_2''': 1} ) writer.write({'''col_1''': '''bar''', '''col_2''': 2} ) lowerCAmelCase__ , lowerCAmelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 lowerCAmelCase__ = pa.BufferReader(output.getvalue() ) lowerCAmelCase__ = pq.read_table(A ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize('''embed_local_files''' , [False, True] ) def _snake_case ( A , A ) -> Optional[int]: import PIL.Image lowerCAmelCase__ = str(tmp_path / '''test_image_rgb.jpg''' ) PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(A , format='''png''' ) lowerCAmelCase__ = pa.BufferOutputStream() with ParquetWriter( stream=A , features=Features({'''image''': Image()} ) , embed_local_files=A ) as writer: writer.write({'''image''': image_path} ) writer.finalize() lowerCAmelCase__ = pa.BufferReader(output.getvalue() ) lowerCAmelCase__ = pq.read_table(A ) lowerCAmelCase__ = pa_table.to_pydict() if embed_local_files: assert isinstance(out['''image'''][0]['''path'''] , A ) with open(A , '''rb''' ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def _snake_case ( ) -> Optional[Any]: lowerCAmelCase__ = pa.schema([pa.field('''col_1''' , pa.string() , nullable=A )] ) lowerCAmelCase__ = pa.BufferOutputStream() with ArrowWriter(stream=A ) as writer: writer._build_writer(inferred_schema=A ) assert writer._schema == pa.schema([pa.field('''col_1''' , pa.string() )] )
90
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a__ ( a__ , a__ , a__ , unittest.TestCase ): '''simple docstring''' lowercase__ : Optional[Any] = AltDiffusionPipeline lowercase__ : Dict = TEXT_TO_IMAGE_PARAMS lowercase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS lowercase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS lowercase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS def __SCREAMING_SNAKE_CASE ( self ) -> str: torch.manual_seed(0 ) lowerCAmelCase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) lowerCAmelCase__ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , ) torch.manual_seed(0 ) lowerCAmelCase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) lowerCAmelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , ) lowerCAmelCase__ = CLIPTextModel(lowerCamelCase_ ) lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) lowerCAmelCase__ = 77 lowerCAmelCase__ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> List[str]: if str(lowerCamelCase_ ).startswith('''mps''' ): lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ ) else: lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) lowerCAmelCase__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ = self.get_dummy_components() torch.manual_seed(0 ) lowerCAmelCase__ = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , ) # TODO: remove after fixing the non-deterministic text encoder lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ ) lowerCAmelCase__ = text_encoder lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ ) lowerCAmelCase__ = '''A photo of an astronaut''' lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ = np.array( [0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ = self.get_dummy_components() lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) torch.manual_seed(0 ) lowerCAmelCase__ = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , ) # TODO: remove after fixing the non-deterministic text encoder lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ ) lowerCAmelCase__ = text_encoder lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ = np.array( [0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: # make sure here that pndm scheduler skips prk lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = '''A painting of a squirrel eating a burger''' lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __SCREAMING_SNAKE_CASE ( self ) -> Any: lowerCAmelCase__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' ) lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ ) lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = '''A painting of a squirrel eating a burger''' lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
90
1
'''simple docstring''' import numpy # List of input, output pairs __UpperCAmelCase = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) __UpperCAmelCase = (((515, 22, 13), 555), ((61, 35, 49), 150)) __UpperCAmelCase = [2, 4, 1, 5] __UpperCAmelCase = len(train_data) __UpperCAmelCase = 0.009 def _snake_case ( A , A="train" ) -> Optional[Any]: return calculate_hypothesis_value(A , A ) - output( A , A ) def _snake_case ( A ) -> int: lowerCAmelCase__ = 0 for i in range(len(A ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def _snake_case ( A , A ) -> List[Any]: if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def _snake_case ( A , A ) -> int: if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def _snake_case ( A , A=m ) -> Tuple: lowerCAmelCase__ = 0 for i in range(A ): if index == -1: summation_value += _error(A ) else: summation_value += _error(A ) * train_data[i][0][index] return summation_value def _snake_case ( A ) -> List[str]: lowerCAmelCase__ = summation_of_cost_derivative(A , A ) / m return cost_derivative_value def _snake_case ( ) -> str: global parameter_vector # Tune these values to set a tolerance value for predicted output lowerCAmelCase__ = 0.000_002 lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 while True: j += 1 lowerCAmelCase__ = [0, 0, 0, 0] for i in range(0 , len(A ) ): lowerCAmelCase__ = get_cost_derivative(i - 1 ) lowerCAmelCase__ = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( A , A , atol=A , rtol=A , ): break lowerCAmelCase__ = temp_parameter_vector print(('''Number of iterations:''', j) ) def _snake_case ( ) -> Union[str, Any]: for i in range(len(A ) ): print(('''Actual output value:''', output(A , '''test''' )) ) print(('''Hypothesis output:''', calculate_hypothesis_value(A , '''test''' )) ) if __name__ == "__main__": run_gradient_descent() print('''\nTesting gradient descent for a linear hypothesis function.\n''') test_gradient_descent()
90
'''simple docstring''' def _snake_case ( A , A ) -> int: return x if y == 0 else greatest_common_divisor(A , x % y ) def _snake_case ( A , A ) -> int: return (x * y) // greatest_common_divisor(A , A ) def _snake_case ( A = 20 ) -> int: lowerCAmelCase__ = 1 for i in range(1 , n + 1 ): lowerCAmelCase__ = lcm(A , A ) return g if __name__ == "__main__": print(f"""{solution() = }""")
90
1
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class a__ ( a__ ): '''simple docstring''' def __init__( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=0 ) -> Optional[int]: lowerCAmelCase__ = 1.0 if scale is None else scale lowerCAmelCase__ = 0.0 if loc is None else loc super().__init__(lowerCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase_ )] ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: return self.base_dist.mean * self.scale + self.loc @property def __SCREAMING_SNAKE_CASE ( self ) -> Dict: return self.base_dist.variance * self.scale**2 @property def __SCREAMING_SNAKE_CASE ( self ) -> str: return self.variance.sqrt() class a__ ( nn.Module ): '''simple docstring''' def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) -> None: super().__init__(**lowerCamelCase_ ) lowerCAmelCase__ = args_dim lowerCAmelCase__ = nn.ModuleList([nn.Linear(lowerCamelCase_ , lowerCamelCase_ ) for dim in args_dim.values()] ) lowerCAmelCase__ = domain_map def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tuple[torch.Tensor]: lowerCAmelCase__ = [proj(lowerCamelCase_ ) for proj in self.proj] return self.domain_map(*lowerCamelCase_ ) class a__ ( nn.Module ): '''simple docstring''' def __init__( self , lowerCamelCase_ ) -> Any: super().__init__() lowerCAmelCase__ = function def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , *lowerCamelCase_ ) -> int: return self.function(lowerCamelCase_ , *lowerCamelCase_ ) class a__ : '''simple docstring''' lowercase__ : type lowercase__ : int lowercase__ : Dict[str, int] def __init__( self , lowerCamelCase_ = 1 ) -> None: lowerCAmelCase__ = dim lowerCAmelCase__ = {k: dim * self.args_dim[k] for k in self.args_dim} def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Union[str, Any]: if self.dim == 1: return self.distribution_class(*lowerCamelCase_ ) else: return Independent(self.distribution_class(*lowerCamelCase_ ) , 1 ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Distribution: lowerCAmelCase__ = self._base_distribution(lowerCamelCase_ ) if loc is None and scale is None: return distr else: return AffineTransformed(lowerCamelCase_ , loc=lowerCamelCase_ , scale=lowerCamelCase_ , event_dim=self.event_dim ) @property def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: return () if self.dim == 1 else (self.dim,) @property def __SCREAMING_SNAKE_CASE ( self ) -> int: return len(self.event_shape ) @property def __SCREAMING_SNAKE_CASE ( self ) -> float: return 0.0 def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> nn.Module: return ParameterProjection( in_features=lowerCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def __SCREAMING_SNAKE_CASE ( self , *lowerCamelCase_ ) -> List[Any]: raise NotImplementedError() @staticmethod def __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ) -> torch.Tensor: return (x + torch.sqrt(torch.square(lowerCamelCase_ ) + 4.0 )) / 2.0 class a__ ( a__ ): '''simple docstring''' lowercase__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} lowercase__ : type = StudentT @classmethod def __SCREAMING_SNAKE_CASE ( cls , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int: lowerCAmelCase__ = cls.squareplus(lowerCamelCase_ ).clamp_min(torch.finfo(scale.dtype ).eps ) lowerCAmelCase__ = 2.0 + cls.squareplus(lowerCamelCase_ ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class a__ ( a__ ): '''simple docstring''' lowercase__ : Dict[str, int] = {"loc": 1, "scale": 1} lowercase__ : type = Normal @classmethod def __SCREAMING_SNAKE_CASE ( cls , lowerCamelCase_ , lowerCamelCase_ ) -> Dict: lowerCAmelCase__ = cls.squareplus(lowerCamelCase_ ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class a__ ( a__ ): '''simple docstring''' lowercase__ : Dict[str, int] = {"total_count": 1, "logits": 1} lowercase__ : type = NegativeBinomial @classmethod def __SCREAMING_SNAKE_CASE ( cls , lowerCamelCase_ , lowerCamelCase_ ) -> int: lowerCAmelCase__ = cls.squareplus(lowerCamelCase_ ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Distribution: lowerCAmelCase__ , lowerCAmelCase__ = distr_args if self.dim == 1: return self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_ ) else: return Independent(self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_ ) , 1 ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None ) -> Distribution: lowerCAmelCase__ , lowerCAmelCase__ = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
90
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu __UpperCAmelCase = [ '''EAGER''', '''AOT_EAGER''', '''INDUCTOR''', '''NVFUSER''', '''AOT_NVFUSER''', '''AOT_CUDAGRAPHS''', '''OFI''', '''FX2TRT''', '''ONNXRT''', '''IPEX''', ] def _snake_case ( A , A=None , A=None , A=None ) -> Union[str, Any]: lowerCAmelCase__ = True while ask_again: lowerCAmelCase__ = input(A ) try: if default is not None and len(A ) == 0: return default return convert_value(A ) if convert_value is not None else result except Exception: if error_message is not None: print(A ) def _snake_case ( A , A=[] , A=None , A=0 ) -> List[Any]: lowerCAmelCase__ = BulletMenu(A , A ) lowerCAmelCase__ = menu.run(default_choice=A ) return convert_value(A ) if convert_value is not None else result def _snake_case ( A ) -> Tuple: lowerCAmelCase__ = int(A ) return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] ) def _snake_case ( A ) -> Union[str, Any]: lowerCAmelCase__ = int(A ) return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] ) def _snake_case ( A ) -> str: lowerCAmelCase__ = int(A ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def _snake_case ( A ) -> Tuple: lowerCAmelCase__ = int(A ) return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] ) def _snake_case ( A ) -> Union[str, Any]: lowerCAmelCase__ = int(A ) return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] ) def _snake_case ( A ) -> List[str]: return {"yes": True, "no": False}[value.lower()] class a__ ( argparse.RawDescriptionHelpFormatter ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict: lowerCAmelCase__ = super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = usage.replace('''<command> [<args>] ''' , '''''' ) return usage
90
1
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch __UpperCAmelCase = random.Random() def _snake_case ( A , A=1.0 , A=None , A=None ) -> Optional[Any]: if rng is None: lowerCAmelCase__ = global_rng lowerCAmelCase__ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class a__ ( unittest.TestCase ): '''simple docstring''' def __init__( self , lowerCamelCase_ , lowerCamelCase_=7 , lowerCamelCase_=4_00 , lowerCamelCase_=20_00 , lowerCamelCase_=1 , lowerCamelCase_=0.0 , lowerCamelCase_=1_60_00 , lowerCamelCase_=True , lowerCamelCase_=80 , lowerCamelCase_=16 , lowerCamelCase_=64 , lowerCamelCase_="hann_window" , lowerCamelCase_=80 , lowerCamelCase_=76_00 , lowerCamelCase_=1e-10 , lowerCamelCase_=True , ) -> List[Any]: lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = min_seq_length lowerCAmelCase__ = max_seq_length lowerCAmelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowerCAmelCase__ = feature_size lowerCAmelCase__ = padding_value lowerCAmelCase__ = sampling_rate lowerCAmelCase__ = do_normalize lowerCAmelCase__ = num_mel_bins lowerCAmelCase__ = hop_length lowerCAmelCase__ = win_length lowerCAmelCase__ = win_function lowerCAmelCase__ = fmin lowerCAmelCase__ = fmax lowerCAmelCase__ = mel_floor lowerCAmelCase__ = return_attention_mask def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False , lowerCamelCase_=False ) -> str: def _flatten(lowerCamelCase_ ): return list(itertools.chain(*lowerCamelCase_ ) ) if equal_length: lowerCAmelCase__ = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size lowerCAmelCase__ = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowerCAmelCase__ = [np.asarray(lowerCamelCase_ ) for x in speech_inputs] return speech_inputs def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False , lowerCamelCase_=False ) -> Union[str, Any]: if equal_length: lowerCAmelCase__ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowerCAmelCase__ = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowerCAmelCase__ = [np.asarray(lowerCamelCase_ ) for x in speech_inputs] return speech_inputs @require_torch class a__ ( a__ , unittest.TestCase ): '''simple docstring''' lowercase__ : str = SpeechTaFeatureExtractor def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = SpeechTaFeatureExtractionTester(self ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tuple: self.assertTrue(np.all(np.mean(lowerCamelCase_ , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCamelCase_ , axis=0 ) - 1 ) < 1e-3 ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: # Tests that all call wrap to encode_plus and batch_encode_plus lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCAmelCase__ = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs] # Test not batched input lowerCAmelCase__ = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values lowerCAmelCase__ = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) ) # Test batched lowerCAmelCase__ = feat_extract(lowerCamelCase_ , return_tensors='''np''' ).input_values lowerCAmelCase__ = feat_extract(lowerCamelCase_ , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) ) def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCAmelCase__ = ['''longest''', '''max_length''', '''do_not_pad'''] lowerCAmelCase__ = [None, 16_00, None] for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ): lowerCAmelCase__ = feat_extract(lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors='''np''' ) lowerCAmelCase__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_00] ) self.assertTrue(input_values[0][8_00:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:10_00] ) self.assertTrue(input_values[0][10_00:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:12_00] ) def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCAmelCase__ = range(8_00 , 14_00 , 2_00 ) lowerCAmelCase__ = [floats_list((1, x) )[0] for x in lengths] lowerCAmelCase__ = ['''longest''', '''max_length''', '''do_not_pad'''] lowerCAmelCase__ = [None, 16_00, None] for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ): lowerCAmelCase__ = feat_extract(lowerCamelCase_ , max_length=lowerCamelCase_ , padding=lowerCamelCase_ ) lowerCAmelCase__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_00] ) self._check_zero_mean_unit_variance(input_values[1][:10_00] ) self._check_zero_mean_unit_variance(input_values[2][:12_00] ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCAmelCase__ = feat_extract( lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10_00 , padding='''max_length''' , return_tensors='''np''' ) lowerCAmelCase__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCAmelCase__ = feat_extract( lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10_00 , padding='''longest''' , return_tensors='''np''' ) lowerCAmelCase__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00] ) self._check_zero_mean_unit_variance(input_values[1, :10_00] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 10_00) ) lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCAmelCase__ = feat_extract( lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=20_00 , padding='''longest''' , return_tensors='''np''' ) lowerCAmelCase__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00] ) self._check_zero_mean_unit_variance(input_values[1, :10_00] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 12_00) ) def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCAmelCase__ = np.random.rand(1_00 ).astype(np.floataa ) lowerCAmelCase__ = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowerCAmelCase__ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) lowerCAmelCase__ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __SCREAMING_SNAKE_CASE ( self ) -> int: # Tests that all call wrap to encode_plus and batch_encode_plus lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCAmelCase__ = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs] # Test feature size lowerCAmelCase__ = feature_extractor(audio_target=lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input lowerCAmelCase__ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values lowerCAmelCase__ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) ) # Test batched lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_values lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. lowerCAmelCase__ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] lowerCAmelCase__ = np.asarray(lowerCamelCase_ ) lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_values lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_target() lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict ) lowerCAmelCase__ = feat_extract.model_input_names[0] lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(lowerCamelCase_ ) == len(lowerCamelCase_ ) for x, y in zip(lowerCamelCase_ , processed_features[input_name] ) ) ) lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCamelCase_ ) lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' ) lowerCAmelCase__ = processed_features[input_name] if len(batch_features_input.shape ) < 3: lowerCAmelCase__ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCamelCase_ ) lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict ) lowerCAmelCase__ = feat_extract.model_input_names[0] lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' ) lowerCAmelCase__ = processed_features[input_name] if len(batch_features_input.shape ) < 3: lowerCAmelCase__ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def __SCREAMING_SNAKE_CASE ( self ) -> Dict: lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict ) lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_target() lowerCAmelCase__ = feat_extract.model_input_names[0] lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} ) lowerCAmelCase__ = feat_extract.num_mel_bins # hack! lowerCAmelCase__ = feat_extract.pad(lowerCamelCase_ , padding='''longest''' , return_tensors='''np''' )[input_name] lowerCAmelCase__ = feat_extract.pad(lowerCamelCase_ , padding='''longest''' , return_tensors='''pt''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = self.feat_extract_dict lowerCAmelCase__ = True lowerCAmelCase__ = self.feature_extraction_class(**lowerCamelCase_ ) lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_target() lowerCAmelCase__ = [len(lowerCamelCase_ ) for x in speech_inputs] lowerCAmelCase__ = feat_extract.model_input_names[0] lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} ) lowerCAmelCase__ = feat_extract.num_mel_bins # hack! lowerCAmelCase__ = feat_extract.pad(lowerCamelCase_ , padding='''longest''' , return_tensors='''np''' ) self.assertIn('''attention_mask''' , lowerCamelCase_ ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = self.feat_extract_dict lowerCAmelCase__ = True lowerCAmelCase__ = self.feature_extraction_class(**lowerCamelCase_ ) lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_target() lowerCAmelCase__ = [len(lowerCamelCase_ ) for x in speech_inputs] lowerCAmelCase__ = feat_extract.model_input_names[0] lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} ) lowerCAmelCase__ = min(lowerCamelCase_ ) lowerCAmelCase__ = feat_extract.num_mel_bins # hack! lowerCAmelCase__ = feat_extract.pad( lowerCamelCase_ , padding='''max_length''' , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors='''np''' ) self.assertIn('''attention_mask''' , lowerCamelCase_ ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Dict: from datasets import load_dataset lowerCAmelCase__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech lowerCAmelCase__ = ds.sort('''id''' ).select(range(lowerCamelCase_ ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: # fmt: off lowerCAmelCase__ = torch.tensor( [2.38_04e-03, 2.07_52e-03, 1.98_36e-03, 2.10_57e-03, 1.61_74e-03, 3.05_18e-04, 9.15_53e-05, 3.35_69e-04, 9.76_56e-04, 1.83_11e-03, 2.01_42e-03, 2.10_57e-03, 1.73_95e-03, 4.57_76e-04, -3.96_73e-04, 4.57_76e-04, 1.00_71e-03, 9.15_53e-05, 4.88_28e-04, 1.15_97e-03, 7.32_42e-04, 9.46_04e-04, 1.80_05e-03, 1.83_11e-03, 8.85_01e-04, 4.27_25e-04, 4.88_28e-04, 7.32_42e-04, 1.09_86e-03, 2.10_57e-03] ) # fmt: on lowerCAmelCase__ = self._load_datasamples(1 ) lowerCAmelCase__ = SpeechTaFeatureExtractor() lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 9_36_80) ) self.assertTrue(torch.allclose(input_values[0, :30] , lowerCamelCase_ , atol=1e-6 ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: # fmt: off lowerCAmelCase__ = torch.tensor( [-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777, -3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386, -3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571, -3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] ) # fmt: on lowerCAmelCase__ = self._load_datasamples(1 ) lowerCAmelCase__ = SpeechTaFeatureExtractor() lowerCAmelCase__ = feature_extractor(audio_target=lowerCamelCase_ , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 3_66, 80) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase_ , atol=1e-4 ) )
90
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class a__ ( a__ ): '''simple docstring''' lowercase__ : torch.FloatTensor class a__ ( a__ , a__ ): '''simple docstring''' @register_to_config def __init__( self , lowerCamelCase_ = 3 , lowerCamelCase_ = 3 , lowerCamelCase_ = ("DownEncoderBlock2D",) , lowerCamelCase_ = ("UpDecoderBlock2D",) , lowerCamelCase_ = (64,) , lowerCamelCase_ = 1 , lowerCamelCase_ = "silu" , lowerCamelCase_ = 3 , lowerCamelCase_ = 32 , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 32 , lowerCamelCase_ = None , lowerCamelCase_ = 0.18_215 , lowerCamelCase_ = "group" , ) -> Union[str, Any]: super().__init__() # pass init params to Encoder lowerCAmelCase__ = Encoder( in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , ) lowerCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 ) lowerCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ ) lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 ) # pass init params to Decoder lowerCAmelCase__ = Decoder( in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , ) @apply_forward_hook def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> VQEncoderOutput: lowerCAmelCase__ = self.encoder(lowerCamelCase_ ) lowerCAmelCase__ = self.quant_conv(lowerCamelCase_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowerCamelCase_ ) @apply_forward_hook def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: # also go through quantization layer if not force_not_quantize: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.quantize(lowerCamelCase_ ) else: lowerCAmelCase__ = h lowerCAmelCase__ = self.post_quant_conv(lowerCamelCase_ ) lowerCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == '''spatial''' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: lowerCAmelCase__ = sample lowerCAmelCase__ = self.encode(lowerCamelCase_ ).latents lowerCAmelCase__ = self.decode(lowerCamelCase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCamelCase_ )
90
1
'''simple docstring''' import itertools import math def _snake_case ( A ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _snake_case ( ) -> Optional[int]: lowerCAmelCase__ = 2 while True: if is_prime(A ): yield num num += 1 def _snake_case ( A = 10001 ) -> int: return next(itertools.islice(prime_generator() , nth - 1 , A ) ) if __name__ == "__main__": print(f"""{solution() = }""")
90
'''simple docstring''' from __future__ import annotations __UpperCAmelCase = list[list[int]] # assigning initial values to the grid __UpperCAmelCase = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __UpperCAmelCase = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def _snake_case ( A , A , A , A ) -> bool: for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def _snake_case ( A ) -> tuple[int, int] | None: for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def _snake_case ( A ) -> Matrix | None: if location := find_empty_location(A ): lowerCAmelCase__ , lowerCAmelCase__ = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(A , A , A , A ): lowerCAmelCase__ = digit if sudoku(A ) is not None: return grid lowerCAmelCase__ = 0 return None def _snake_case ( A ) -> None: for row in grid: for cell in row: print(A , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('''\nExample grid:\n''' + '''=''' * 20) print_solution(example_grid) print('''\nExample grid solution:''') __UpperCAmelCase = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('''Cannot find a solution.''')
90
1
'''simple docstring''' from __future__ import annotations def _snake_case ( A , A ) -> float: lowerCAmelCase__ = sorted(numsa + numsa ) lowerCAmelCase__ , lowerCAmelCase__ = divmod(len(A ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = [float(x) for x in input('''Enter the elements of first array: ''').split()] __UpperCAmelCase = [float(x) for x in input('''Enter the elements of second array: ''').split()] print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
90
'''simple docstring''' def _snake_case ( A ) -> int: if n == 1 or not isinstance(A , A ): return 0 elif n == 2: return 1 else: lowerCAmelCase__ = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _snake_case ( A ) -> int: lowerCAmelCase__ = 0 lowerCAmelCase__ = 2 while digits < n: index += 1 lowerCAmelCase__ = len(str(fibonacci(A ) ) ) return index def _snake_case ( A = 1000 ) -> int: return fibonacci_digits_index(A ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
90
1
'''simple docstring''' import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( ) -> Union[str, Any]: # Get the sagemaker specific mp parameters from smp_options variable. lowerCAmelCase__ = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. lowerCAmelCase__ = json.loads(A ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. lowerCAmelCase__ = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". lowerCAmelCase__ = json.loads(A ) if not mpi_options.get('''sagemaker_mpi_enabled''' , A ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec('''smdistributed''' ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class a__ ( a__ ): '''simple docstring''' lowercase__ : str = field( default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: super().__post_init__() warnings.warn( '''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use ''' '''`TrainingArguments` instead.''' , lowerCamelCase_ , ) @cached_property def __SCREAMING_SNAKE_CASE ( self ) -> "torch.device": logger.info('''PyTorch: setting up devices''' ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( '''torch.distributed process group is initialized, but local_rank == -1. ''' '''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' ) if self.no_cuda: lowerCAmelCase__ = torch.device('''cpu''' ) lowerCAmelCase__ = 0 elif is_sagemaker_model_parallel_available(): lowerCAmelCase__ = smp.local_rank() lowerCAmelCase__ = torch.device('''cuda''' , lowerCamelCase_ ) lowerCAmelCase__ = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta ) lowerCAmelCase__ = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) ) lowerCAmelCase__ = torch.device('''cuda''' , self.local_rank ) lowerCAmelCase__ = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 lowerCAmelCase__ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. lowerCAmelCase__ = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta ) lowerCAmelCase__ = torch.device('''cuda''' , self.local_rank ) lowerCAmelCase__ = 1 if device.type == "cuda": torch.cuda.set_device(lowerCamelCase_ ) return device @property def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: return not is_sagemaker_model_parallel_available() @property def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: return False
90
'''simple docstring''' from __future__ import annotations from random import choice def _snake_case ( A ) -> int: return choice(A ) def _snake_case ( A , A ) -> int: lowerCAmelCase__ = random_pivot(A ) # partition based on pivot # linear time lowerCAmelCase__ = [e for e in lst if e < pivot] lowerCAmelCase__ = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(A ) == k - 1: return pivot # pivot is in elements bigger than k elif len(A ) < k - 1: return kth_number(A , k - len(A ) - 1 ) # pivot is in elements smaller than k else: return kth_number(A , A ) if __name__ == "__main__": import doctest doctest.testmod()
90
1
'''simple docstring''' from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __UpperCAmelCase = [ '''python''', '''tqdm''', '''regex''', '''requests''', '''packaging''', '''filelock''', '''numpy''', '''tokenizers''', '''huggingface-hub''', '''safetensors''', '''accelerate''', '''pyyaml''', ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def _snake_case ( A , A=None ) -> Optional[int]: require_version(deps[pkg] , A )
90
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
90
1
'''simple docstring''' import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'''vocab_file''': '''vocab.txt'''} __UpperCAmelCase = { '''vocab_file''': { '''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''', '''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''', }, } __UpperCAmelCase = { '''facebook/esm2_t6_8M_UR50D''': 1_024, '''facebook/esm2_t12_35M_UR50D''': 1_024, } def _snake_case ( A ) -> Optional[Any]: with open(A , '''r''' ) as f: lowerCAmelCase__ = f.read().splitlines() return [l.strip() for l in lines] class a__ ( a__ ): '''simple docstring''' lowercase__ : Optional[Any] = VOCAB_FILES_NAMES lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_="<eos>" , **lowerCamelCase_ , ) -> Tuple: super().__init__(**lowerCamelCase_ ) lowerCAmelCase__ = load_vocab_file(lowerCamelCase_ ) lowerCAmelCase__ = dict(enumerate(self.all_tokens ) ) lowerCAmelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )} lowerCAmelCase__ = unk_token lowerCAmelCase__ = cls_token lowerCAmelCase__ = pad_token lowerCAmelCase__ = mask_token lowerCAmelCase__ = eos_token lowerCAmelCase__ = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str: return self._id_to_token.get(lowerCamelCase_ , self.unk_token ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]: return text.split() def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False ) -> Dict: return len(self._id_to_token ) def __SCREAMING_SNAKE_CASE ( self ) -> int: return {token: i for i, token in enumerate(self.all_tokens )} def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str: return self._id_to_token.get(lowerCamelCase_ , self.unk_token ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]: lowerCAmelCase__ = [self.cls_token_id] lowerCAmelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] lowerCAmelCase__ = [1] + ([0] * len(lowerCamelCase_ )) + [1] if token_ids_a is not None: mask += [0] * len(lowerCamelCase_ ) + [1] return mask def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]: lowerCAmelCase__ = os.path.join(lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' ) with open(lowerCamelCase_ , '''w''' ) as f: f.write('''\n'''.join(self.all_tokens ) ) return (vocab_file,) @property def __SCREAMING_SNAKE_CASE ( self ) -> int: return self.get_vocab_size(with_added_tokens=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False ) -> int: return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ )
90
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __UpperCAmelCase = TypeVar('''KEY''') __UpperCAmelCase = TypeVar('''VAL''') @dataclass(frozen=a__ , slots=a__ ) class a__ ( Generic[KEY, VAL] ): '''simple docstring''' lowercase__ : KEY lowercase__ : VAL class a__ ( _Item ): '''simple docstring''' def __init__( self ) -> None: super().__init__(lowerCamelCase_ , lowerCamelCase_ ) def __bool__( self ) -> bool: return False __UpperCAmelCase = _DeletedItem() class a__ ( MutableMapping[KEY, VAL] ): '''simple docstring''' def __init__( self , lowerCamelCase_ = 8 , lowerCamelCase_ = 0.75 ) -> None: lowerCAmelCase__ = initial_block_size lowerCAmelCase__ = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 lowerCAmelCase__ = capacity_factor lowerCAmelCase__ = 0 def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return hash(lowerCamelCase_ ) % len(self._buckets ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int: return (ind + 1) % len(self._buckets ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool: lowerCAmelCase__ = self._buckets[ind] if not stored: lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ ) self._len += 1 return True elif stored.key == key: lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ ) return True else: return False def __SCREAMING_SNAKE_CASE ( self ) -> bool: lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor return len(self ) >= int(lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> bool: if len(self._buckets ) <= self._initial_block_size: return False lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None: lowerCAmelCase__ = self._buckets lowerCAmelCase__ = [None] * new_size lowerCAmelCase__ = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def __SCREAMING_SNAKE_CASE ( self ) -> None: self._resize(len(self._buckets ) * 2 ) def __SCREAMING_SNAKE_CASE ( self ) -> None: self._resize(len(self._buckets ) // 2 ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterator[int]: lowerCAmelCase__ = self._get_bucket_index(lowerCamelCase_ ) for _ in range(len(self._buckets ) ): yield ind lowerCAmelCase__ = self._get_next_ind(lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None: for ind in self._iterate_buckets(lowerCamelCase_ ): if self._try_set(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): break def __setitem__( self , lowerCamelCase_ , lowerCamelCase_ ) -> None: if self._is_full(): self._size_up() self._add_item(lowerCamelCase_ , lowerCamelCase_ ) def __delitem__( self , lowerCamelCase_ ) -> None: for ind in self._iterate_buckets(lowerCamelCase_ ): lowerCAmelCase__ = self._buckets[ind] if item is None: raise KeyError(lowerCamelCase_ ) if item is _deleted: continue if item.key == key: lowerCAmelCase__ = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , lowerCamelCase_ ) -> VAL: for ind in self._iterate_buckets(lowerCamelCase_ ): lowerCAmelCase__ = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(lowerCamelCase_ ) def __len__( self ) -> int: return self._len def __iter__( self ) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self ) -> str: lowerCAmelCase__ = ''' ,'''.join( F"""{item.key}: {item.val}""" for item in self._buckets if item ) return F"""HashMap({val_string})"""
90
1
'''simple docstring''' from __future__ import annotations __UpperCAmelCase = [] def _snake_case ( A , A , A ) -> bool: for i in range(len(A ) ): if board[row][i] == 1: return False for i in range(len(A ) ): if board[i][column] == 1: return False for i, j in zip(range(A , -1 , -1 ) , range(A , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(A , -1 , -1 ) , range(A , len(A ) ) ): if board[i][j] == 1: return False return True def _snake_case ( A , A ) -> bool: if row >= len(A ): solution.append(A ) printboard(A ) print() return True for i in range(len(A ) ): if is_safe(A , A , A ): lowerCAmelCase__ = 1 solve(A , row + 1 ) lowerCAmelCase__ = 0 return False def _snake_case ( A ) -> None: for i in range(len(A ) ): for j in range(len(A ) ): if board[i][j] == 1: print('''Q''' , end=''' ''' ) else: print('''.''' , end=''' ''' ) print() # n=int(input("The no. of queens")) __UpperCAmelCase = 8 __UpperCAmelCase = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('''The total no. of solutions are :''', len(solution))
90
'''simple docstring''' import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def _snake_case ( A , A , A ) -> Union[str, Any]: lowerCAmelCase__ = OmegaConf.load(A ) lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )['''model'''] lowerCAmelCase__ = list(state_dict.keys() ) # extract state_dict for VQVAE lowerCAmelCase__ = {} lowerCAmelCase__ = '''first_stage_model.''' for key in keys: if key.startswith(A ): lowerCAmelCase__ = state_dict[key] # extract state_dict for UNetLDM lowerCAmelCase__ = {} lowerCAmelCase__ = '''model.diffusion_model.''' for key in keys: if key.startswith(A ): lowerCAmelCase__ = state_dict[key] lowerCAmelCase__ = config.model.params.first_stage_config.params lowerCAmelCase__ = config.model.params.unet_config.params lowerCAmelCase__ = VQModel(**A ).eval() vqvae.load_state_dict(A ) lowerCAmelCase__ = UNetLDMModel(**A ).eval() unet.load_state_dict(A ) lowerCAmelCase__ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=A , ) lowerCAmelCase__ = LDMPipeline(A , A , A ) pipeline.save_pretrained(A ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', type=str, required=True) parser.add_argument('''--config_path''', type=str, required=True) parser.add_argument('''--output_path''', type=str, required=True) __UpperCAmelCase = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
90
1
'''simple docstring''' from __future__ import annotations from random import choice def _snake_case ( A ) -> int: return choice(A ) def _snake_case ( A , A ) -> int: lowerCAmelCase__ = random_pivot(A ) # partition based on pivot # linear time lowerCAmelCase__ = [e for e in lst if e < pivot] lowerCAmelCase__ = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(A ) == k - 1: return pivot # pivot is in elements bigger than k elif len(A ) < k - 1: return kth_number(A , k - len(A ) - 1 ) # pivot is in elements smaller than k else: return kth_number(A , A ) if __name__ == "__main__": import doctest doctest.testmod()
90
'''simple docstring''' import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __UpperCAmelCase = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class a__ ( a__ ): '''simple docstring''' lowercase__ : bool = field(default=a__ , metadata={"help": "Whether to use SortishSampler or not."} ) lowercase__ : bool = field( default=a__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowercase__ : Optional[int] = field( default=a__ , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowercase__ : Optional[int] = field( default=a__ , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowercase__ : Optional[Union[str, Path, GenerationConfig]] = field( default=a__ , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: lowerCAmelCase__ = super().to_dict() for k, v in d.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): lowerCAmelCase__ = v.to_dict() return d
90
1
'''simple docstring''' # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path __UpperCAmelCase = Path(__file__).resolve().parents[3] / '''src''' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) __UpperCAmelCase = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''} __UpperCAmelCase = '''zero2''' __UpperCAmelCase = '''zero3''' __UpperCAmelCase = [ZEROa, ZEROa] def _snake_case ( A , A , A ) -> List[str]: # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param lowerCAmelCase__ = parameterized.to_safe_name('''_'''.join(str(A ) for x in param.args ) ) return F"""{func.__name__}_{param_based_name}""" # Cartesian-product of zero stages with models to test __UpperCAmelCase = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class a__ ( a__ ): '''simple docstring''' @parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Any: self.run_and_check( stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , ) @require_torch_multi_gpu @parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> int: self.run_and_check( stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , ) @parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple: self.run_and_check( stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , ) @require_torch_multi_gpu @parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]: self.run_and_check( stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Union[str, Any]: # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 10 , lowerCamelCase_ = True , lowerCamelCase_ = True , lowerCamelCase_ = True , ) -> Optional[int]: lowerCAmelCase__ = models[model] lowerCAmelCase__ = self.run_trainer( stage=lowerCamelCase_ , model_name=lowerCamelCase_ , eval_steps=lowerCamelCase_ , num_train_epochs=1 , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , ) self.do_checks(lowerCamelCase_ ) return output_dir def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 10 , lowerCamelCase_ = 1 , lowerCamelCase_ = True , lowerCamelCase_ = True , ) -> Optional[int]: lowerCAmelCase__ = self.get_auto_remove_tmp_dir('''./xxx''' , after=lowerCamelCase_ ) lowerCAmelCase__ = F""" --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(lowerCamelCase_ )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none """.split() if fpaa: args.extend(['''--fp16'''] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files lowerCAmelCase__ = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split() lowerCAmelCase__ = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""] lowerCAmelCase__ = self.get_launcher(lowerCamelCase_ ) lowerCAmelCase__ = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(lowerCamelCase_ , env=self.get_env() ) return output_dir def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False ) -> Any: # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) lowerCAmelCase__ = min(2 , get_gpu_count() ) if distributed else 1 return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
90
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device __UpperCAmelCase = False class a__ ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe.dual_guided( prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase_ ) lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = generator.manual_seed(0 ) lowerCAmelCase__ = pipe.dual_guided( prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = '''cyberpunk 2077''' lowerCAmelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe.dual_guided( prompt=lowerCamelCase_ , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 lowerCAmelCase__ = '''A painting of a squirrel eating a burger ''' lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe.text_to_image( prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 lowerCAmelCase__ = pipe.image_variation(lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''numpy''' ).images lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
90
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a__ ( a__ , unittest.TestCase ): '''simple docstring''' lowercase__ : Optional[int] = KandinskyVaaInpaintPipeline lowercase__ : Any = ["image_embeds", "negative_image_embeds", "image", "mask_image"] lowercase__ : int = [ "image_embeds", "negative_image_embeds", "image", "mask_image", ] lowercase__ : List[str] = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] lowercase__ : int = False @property def __SCREAMING_SNAKE_CASE ( self ) -> Any: return 32 @property def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: return 32 @property def __SCREAMING_SNAKE_CASE ( self ) -> int: return self.time_input_dim @property def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: return self.time_input_dim * 4 @property def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: return 1_00 @property def __SCREAMING_SNAKE_CASE ( self ) -> Any: torch.manual_seed(0 ) lowerCAmelCase__ = { '''in_channels''': 9, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } lowerCAmelCase__ = UNetaDConditionModel(**lowerCamelCase_ ) return model @property def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: torch.manual_seed(0 ) lowerCAmelCase__ = VQModel(**self.dummy_movq_kwargs ) return model def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = self.dummy_unet lowerCAmelCase__ = self.dummy_movq lowerCAmelCase__ = DDIMScheduler( num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowerCamelCase_ , ) lowerCAmelCase__ = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> Tuple: lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ ) lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( lowerCamelCase_ ) # create init_image lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ ) lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase__ = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('''RGB''' ).resize((2_56, 2_56) ) # create mask lowerCAmelCase__ = np.ones((64, 64) , dtype=np.floataa ) lowerCAmelCase__ = 0 if str(lowerCamelCase_ ).startswith('''mps''' ): lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ ) else: lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) lowerCAmelCase__ = { '''image''': init_image, '''mask_image''': mask, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 2, '''guidance_scale''': 4.0, '''output_type''': '''np''', } return inputs def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = '''cpu''' lowerCAmelCase__ = self.get_dummy_components() lowerCAmelCase__ = self.pipeline_class(**lowerCamelCase_ ) lowerCAmelCase__ = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) ) lowerCAmelCase__ = output.images lowerCAmelCase__ = pipe( **self.get_dummy_inputs(lowerCamelCase_ ) , return_dict=lowerCamelCase_ , )[0] lowerCAmelCase__ = image[0, -3:, -3:, -1] lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1] print(F"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ = np.array( [0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def __SCREAMING_SNAKE_CASE ( self ) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' ) lowerCAmelCase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) lowerCAmelCase__ = np.ones((7_68, 7_68) , dtype=np.floataa ) lowerCAmelCase__ = 0 lowerCAmelCase__ = '''a hat''' lowerCAmelCase__ = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(lowerCamelCase_ ) lowerCAmelCase__ = KandinskyVaaInpaintPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa ) lowerCAmelCase__ = pipeline.to(lowerCamelCase_ ) pipeline.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCAmelCase__ , lowerCAmelCase__ = pipe_prior( lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() lowerCAmelCase__ = pipeline( image=lowerCamelCase_ , mask_image=lowerCamelCase_ , image_embeds=lowerCamelCase_ , negative_image_embeds=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='''np''' , ) lowerCAmelCase__ = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
90
'''simple docstring''' from __future__ import annotations def _snake_case ( A ) -> bool: lowerCAmelCase__ = str(A ) return len(A ) == 9 and set(A ) == set('''123456789''' ) def _snake_case ( ) -> int | None: for base_num in range(9999 , 4999 , -1 ): lowerCAmelCase__ = 100002 * base_num if is_9_pandigital(A ): return candidate for base_num in range(333 , 99 , -1 ): lowerCAmelCase__ = 1002003 * base_num if is_9_pandigital(A ): return candidate return None if __name__ == "__main__": print(f"""{solution() = }""")
90
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a__ ( a__ , a__ , unittest.TestCase ): '''simple docstring''' lowercase__ : int = StableDiffusionSAGPipeline lowercase__ : Any = TEXT_TO_IMAGE_PARAMS lowercase__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS lowercase__ : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS lowercase__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS lowercase__ : List[Any] = False def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: torch.manual_seed(0 ) lowerCAmelCase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) lowerCAmelCase__ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , ) torch.manual_seed(0 ) lowerCAmelCase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) lowerCAmelCase__ = CLIPTextModel(lowerCamelCase_ ) lowerCAmelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCAmelCase__ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> Tuple: if str(lowerCamelCase_ ).startswith('''mps''' ): lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ ) else: lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) lowerCAmelCase__ = { '''prompt''': '''.''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 1.0, '''sag_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: lowerCAmelCase__ = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) lowerCAmelCase__ = sag_pipe.to(lowerCamelCase_ ) sag_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = '''.''' lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = sag_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: lowerCAmelCase__ = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) lowerCAmelCase__ = sag_pipe.to(lowerCamelCase_ ) sag_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = '''.''' lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = sag_pipe( [prompt] , generator=lowerCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: lowerCAmelCase__ = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) lowerCAmelCase__ = sag_pipe.to(lowerCamelCase_ ) sag_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCAmelCase__ = '''.''' lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = sag_pipe( [prompt] , width=7_68 , height=5_12 , generator=lowerCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , ) lowerCAmelCase__ = output.images assert image.shape == (1, 5_12, 7_68, 3)
90
'''simple docstring''' # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES __UpperCAmelCase = '''tiny-wmt19-en-ru''' # Build # borrowed from a test __UpperCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] __UpperCAmelCase = dict(zip(vocab, range(len(vocab)))) __UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] with tempfile.TemporaryDirectory() as tmpdirname: __UpperCAmelCase = Path(tmpdirname) __UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file'''] __UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file'''] __UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file'''] with open(src_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, '''w''') as fp: fp.write('''\n'''.join(merges)) __UpperCAmelCase = FSMTTokenizer( langs=['''en''', '''ru'''], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) __UpperCAmelCase = FSMTConfig( langs=['''ru''', '''en'''], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) __UpperCAmelCase = FSMTForConditionalGeneration(config) print(f"""num of params {tiny_model.num_parameters()}""") # Test __UpperCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''') __UpperCAmelCase = tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"""Generated {mname_tiny}""") # Upload # transformers-cli upload tiny-wmt19-en-ru
90
1
'''simple docstring''' from typing import List, Union import numpy as np from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, logging from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline __UpperCAmelCase = logging.get_logger(__name__) class a__ ( a__ ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]: if isinstance(lowerCamelCase_ , lowerCamelCase_ ): lowerCAmelCase__ = [label.strip() for label in labels.split(''',''' ) if label.strip()] return labels def __call__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]: if len(lowerCamelCase_ ) == 0 or len(lowerCamelCase_ ) == 0: raise ValueError('''You must include at least one label and at least one sequence.''' ) if hypothesis_template.format(labels[0] ) == hypothesis_template: raise ValueError( ( '''The provided hypothesis_template "{}" was not able to be formatted with the target labels. ''' '''Make sure the passed template includes formatting syntax such as {{}} where the label should go.''' ).format(lowerCamelCase_ ) ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): lowerCAmelCase__ = [sequences] lowerCAmelCase__ = [] for sequence in sequences: sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase_ )] for label in labels] ) return sequence_pairs, sequences @add_end_docstrings(a__ ) class a__ ( a__ ): '''simple docstring''' def __init__( self , lowerCamelCase_=ZeroShotClassificationArgumentHandler() , *lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]: lowerCAmelCase__ = args_parser super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) if self.entailment_id == -1: logger.warning( '''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to ''' '''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' ) @property def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: for label, ind in self.model.config.labelaid.items(): if label.lower().startswith('''entail''' ): return ind return -1 def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=TruncationStrategy.ONLY_FIRST , **lowerCamelCase_ ) -> Optional[Any]: lowerCAmelCase__ = self.framework if self.tokenizer.pad_token is None: # Override for tokenizers not supporting padding logger.error( '''Tokenizer was not supporting padding necessary for zero-shot, attempting to use ''' ''' `pad_token=eos_token`''' ) lowerCAmelCase__ = self.tokenizer.eos_token try: lowerCAmelCase__ = self.tokenizer( lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , ) except Exception as e: if "too short" in str(lowerCamelCase_ ): # tokenizers might yell that we want to truncate # to a value that is not even reached by the input. # In that case we don't want to truncate. # It seems there's not a really better way to catch that # exception. lowerCAmelCase__ = self.tokenizer( lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , ) else: raise e return inputs def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> List[str]: if kwargs.get('''multi_class''' , lowerCamelCase_ ) is not None: lowerCAmelCase__ = kwargs['''multi_class'''] logger.warning( '''The `multi_class` argument has been deprecated and renamed to `multi_label`. ''' '''`multi_class` will be removed in a future version of Transformers.''' ) lowerCAmelCase__ = {} if "candidate_labels" in kwargs: lowerCAmelCase__ = self._args_parser._parse_labels(kwargs['''candidate_labels'''] ) if "hypothesis_template" in kwargs: lowerCAmelCase__ = kwargs['''hypothesis_template'''] lowerCAmelCase__ = {} if "multi_label" in kwargs: lowerCAmelCase__ = kwargs['''multi_label'''] return preprocess_params, {}, postprocess_params def __call__( self , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ , ) -> Dict: if len(lowerCamelCase_ ) == 0: pass elif len(lowerCamelCase_ ) == 1 and "candidate_labels" not in kwargs: lowerCAmelCase__ = args[0] else: raise ValueError(F"""Unable to understand extra arguments {args}""" ) return super().__call__(lowerCamelCase_ , **lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This example is {}." ) -> Any: lowerCAmelCase__ , lowerCAmelCase__ = self._args_parser(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ ) ): lowerCAmelCase__ = self._parse_and_tokenize([sequence_pair] ) yield { "candidate_label": candidate_label, "sequence": sequences[0], "is_last": i == len(lowerCamelCase_ ) - 1, **model_input, } def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[int]: lowerCAmelCase__ = inputs['''candidate_label'''] lowerCAmelCase__ = inputs['''sequence'''] lowerCAmelCase__ = {k: inputs[k] for k in self.tokenizer.model_input_names} lowerCAmelCase__ = self.model(**lowerCamelCase_ ) lowerCAmelCase__ = { '''candidate_label''': candidate_label, '''sequence''': sequence, '''is_last''': inputs['''is_last'''], **outputs, } return model_outputs def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=False ) -> Tuple: lowerCAmelCase__ = [outputs['''candidate_label'''] for outputs in model_outputs] lowerCAmelCase__ = [outputs['''sequence'''] for outputs in model_outputs] lowerCAmelCase__ = np.concatenate([output['''logits'''].numpy() for output in model_outputs] ) lowerCAmelCase__ = logits.shape[0] lowerCAmelCase__ = len(lowerCamelCase_ ) lowerCAmelCase__ = N // n lowerCAmelCase__ = logits.reshape((num_sequences, n, -1) ) if multi_label or len(lowerCamelCase_ ) == 1: # softmax over the entailment vs. contradiction dim for each label independently lowerCAmelCase__ = self.entailment_id lowerCAmelCase__ = -1 if entailment_id == 0 else 0 lowerCAmelCase__ = reshaped_outputs[..., [contradiction_id, entailment_id]] lowerCAmelCase__ = np.exp(lowerCamelCase_ ) / np.exp(lowerCamelCase_ ).sum(-1 , keepdims=lowerCamelCase_ ) lowerCAmelCase__ = scores[..., 1] else: # softmax the "entailment" logits over all candidate labels lowerCAmelCase__ = reshaped_outputs[..., self.entailment_id] lowerCAmelCase__ = np.exp(lowerCamelCase_ ) / np.exp(lowerCamelCase_ ).sum(-1 , keepdims=lowerCamelCase_ ) lowerCAmelCase__ = list(reversed(scores[0].argsort() ) ) return { "sequence": sequences[0], "labels": [candidate_labels[i] for i in top_inds], "scores": scores[0, top_inds].tolist(), }
90
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def _snake_case ( ) -> Union[str, Any]: raise RuntimeError('''CUDA out of memory.''' ) class a__ ( nn.Module ): '''simple docstring''' def __init__( self ) -> int: super().__init__() lowerCAmelCase__ = nn.Linear(3 , 4 ) lowerCAmelCase__ = nn.BatchNormad(4 ) lowerCAmelCase__ = nn.Linear(4 , 5 ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]: return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_ ) ) ) class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(lowerCamelCase_ ): nonlocal batch_sizes batch_sizes.append(lowerCamelCase_ ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ ): nonlocal batch_sizes batch_sizes.append(lowerCamelCase_ ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga lowerCAmelCase__ , lowerCAmelCase__ = mock_training_loop_function('''hello''' ) self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(lowerCamelCase_ ): pass with self.assertRaises(lowerCamelCase_ ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(lowerCamelCase_ ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(lowerCamelCase_ ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(lowerCamelCase_ ) as cm: mock_training_loop_function(1_28 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(lowerCamelCase_ ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(lowerCamelCase_ ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = torch.cuda.memory_allocated() lowerCAmelCase__ = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_ ) lowerCAmelCase__ = release_memory(lowerCamelCase_ ) self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_ )
90
1