code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' from __future__ import annotations import math def lowerCamelCase__ ( _A , _A ): a : List[Any] = u for i in range(1 , _A ): a : Any = temp * (u - i) return temp def lowerCamelCase__ ( ): a : Tuple = int(input('enter the numbers of values: ' ) ) a : list[list[float]] = [] for _ in range(_A ): y.append([] ) for i in range(_A ): for j in range(_A ): y[i].append(_A ) a : Optional[Any] = 0 print('enter the values of parameters in a list: ' ) a : Optional[Any] = list(map(_A , input().split() ) ) print('enter the values of corresponding parameters: ' ) for i in range(_A ): a : List[Any] = float(input() ) a : Any = int(input('enter the value to interpolate: ' ) ) a : Union[str, Any] = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , _A ): for j in range(n - i ): a : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1] a : Dict = y[0][0] for i in range(1 , _A ): summ += (ucal(_A , _A ) * y[0][i]) / math.factorial(_A ) print(f"""the value at {value} is {summ}""" ) if __name__ == "__main__": main()
297
'''simple docstring''' def lowerCamelCase__ ( _A ): return 10 - x * x def lowerCamelCase__ ( _A , _A ): # Bolzano theory in order to find if there is a root between a and b if equation(_A ) * equation(_A ) >= 0: raise ValueError('Wrong space!' ) a : Tuple = a while (b - a) >= 0.01: # Find middle point a : Tuple = (a + b) / 2 # Check if middle point is root if equation(_A ) == 0.0: break # Decide the side to repeat the steps if equation(_A ) * equation(_A ) < 0: a : List[str] = c else: a : Tuple = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
297
1
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import ( BaseOutput, OptionalDependencyNotAvailable, is_flax_available, is_k_diffusion_available, is_k_diffusion_version, is_onnx_available, is_torch_available, is_transformers_available, is_transformers_version, ) @dataclass class a__( lowerCamelCase__ ): lowercase__ = 42 lowercase__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion import StableDiffusionPipeline from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from .pipeline_stable_unclip import StableUnCLIPPipeline from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline else: from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionPixaPixZeroPipeline, ) else: from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline try: if not ( is_torch_available() and is_transformers_available() and is_k_diffusion_available() and is_k_diffusion_version('>=', '0.0.12') ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_onnx_objects import * # noqa F403 else: from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline if is_transformers_available() and is_flax_available(): import flax @flax.struct.dataclass class a__( lowerCamelCase__ ): lowercase__ = 42 lowercase__ = 42 from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
297
'''simple docstring''' import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class a__: def __init__( self : str , __snake_case : Union[str, Any] , __snake_case : List[str]=13 , __snake_case : Tuple=7 , __snake_case : Optional[Any]=False , __snake_case : Dict=True , __snake_case : List[Any]=False , __snake_case : Optional[int]=False , __snake_case : Optional[Any]=19 , __snake_case : Any=32 , __snake_case : Union[str, Any]=5 , __snake_case : Union[str, Any]=4 , __snake_case : int=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=0.1 , __snake_case : int=5_12 , __snake_case : int=16 , __snake_case : Tuple=2 , __snake_case : str=0.02 , __snake_case : str=3 , __snake_case : Dict=4 , __snake_case : List[Any]=None , ): a : Tuple = parent a : List[str] = batch_size a : Optional[Any] = seq_length a : Tuple = is_training a : Optional[Any] = use_input_mask a : List[Any] = use_token_type_ids a : List[Any] = use_labels a : int = vocab_size a : Union[str, Any] = hidden_size a : Any = num_hidden_layers a : List[str] = num_attention_heads a : int = intermediate_size a : str = hidden_act a : Tuple = hidden_dropout_prob a : Union[str, Any] = attention_probs_dropout_prob a : List[str] = max_position_embeddings a : Any = type_vocab_size a : List[str] = type_sequence_label_size a : Union[str, Any] = initializer_range a : Optional[int] = num_labels a : Optional[Any] = num_choices a : Optional[int] = scope def lowercase_ ( self : List[Any] ): a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a : Dict = None if self.use_input_mask: a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) a : Optional[Any] = None a : Optional[int] = None a : Dict = None if self.use_labels: a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a : List[str] = ids_tensor([self.batch_size] , self.num_choices ) a : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase_ ( self : List[Any] ): a : Any = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__snake_case , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , ) return config def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Any ): a : Tuple = EsmForProteinFolding(config=__snake_case ).float() model.to(__snake_case ) model.eval() a : Dict = model(__snake_case , attention_mask=__snake_case ) a : Union[str, Any] = model(__snake_case ) a : List[Any] = model(__snake_case ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def lowercase_ ( self : Optional[Any] ): a : Tuple = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) : Optional[Any] = config_and_inputs a : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase__ = False lowercase__ = (EsmForProteinFolding,) if is_torch_available() else () lowercase__ = () lowercase__ = {} if is_torch_available() else {} lowercase__ = False def lowercase_ ( self : int ): a : Tuple = EsmFoldModelTester(self ) a : Any = ConfigTester(self , config_class=__snake_case , hidden_size=37 ) def lowercase_ ( self : List[str] ): self.config_tester.run_common_tests() def lowercase_ ( self : Union[str, Any] ): a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) @unittest.skip('Does not support attention outputs' ) def lowercase_ ( self : str ): pass @unittest.skip def lowercase_ ( self : Optional[int] ): pass @unittest.skip('Esm does not support embedding resizing' ) def lowercase_ ( self : Optional[int] ): pass @unittest.skip('Esm does not support embedding resizing' ) def lowercase_ ( self : Any ): pass @unittest.skip('ESMFold does not support passing input embeds!' ) def lowercase_ ( self : Any ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : Union[str, Any] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : int ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold does not output hidden states in the normal way.' ) def lowercase_ ( self : int ): pass @unittest.skip('ESMfold does not output hidden states in the normal way.' ) def lowercase_ ( self : int ): pass @unittest.skip('ESMFold only has one output format.' ) def lowercase_ ( self : Dict ): pass @unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' ) def lowercase_ ( self : Tuple ): pass @unittest.skip('ESMFold does not support input chunking.' ) def lowercase_ ( self : List[str] ): pass @unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowercase_ ( self : Union[str, Any] ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowercase_ ( self : Any ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowercase_ ( self : List[str] ): pass @unittest.skip('ESMFold doesn\'t support data parallel.' ) def lowercase_ ( self : Dict ): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase_ ( self : Union[str, Any] ): pass @require_torch class a__( lowerCamelCase__ ): @slow def lowercase_ ( self : Optional[int] ): a : Optional[Any] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float() model.eval() a : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) a : Any = model(__snake_case )['positions'] a : Dict = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __snake_case , atol=1e-4 ) )
297
1
'''simple docstring''' from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class a__: lowercase__ = 42 lowercase__ = None lowercase__ = None lowerCAmelCase: Optional[Any] = namedtuple('CoinsDistribResult', 'moves excess') def lowerCamelCase__ ( _A ): if root is None: return 0 # Validation def count_nodes(_A ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(_A ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(_A ) != count_coins(_A ): raise ValueError('The nodes number should be same as the number of coins' ) # Main calculation def get_distrib(_A ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) a , a : str = get_distrib(node.left ) a , a : int = get_distrib(node.right ) a : Dict = 1 - left_distrib_excess a : List[Any] = 1 - right_distrib_excess a : int = ( left_distrib_moves + right_distrib_moves + abs(_A ) + abs(_A ) ) a : str = node.data - coins_to_left - coins_to_right return CoinsDistribResult(_A , _A ) return get_distrib(_A )[0] if __name__ == "__main__": import doctest doctest.testmod()
297
'''simple docstring''' from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class a__( nn.Module ): def __init__( self : Any , __snake_case : int = 16 , __snake_case : int = 88 , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : float = 0.0 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , ): super().__init__() a : Optional[int] = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__snake_case , attention_head_dim=__snake_case , in_channels=__snake_case , num_layers=__snake_case , dropout=__snake_case , norm_num_groups=__snake_case , cross_attention_dim=__snake_case , attention_bias=__snake_case , sample_size=__snake_case , num_vector_embeds=__snake_case , activation_fn=__snake_case , num_embeds_ada_norm=__snake_case , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference a : Union[str, Any] = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` a : Tuple = [77, 2_57] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` a : Any = [1, 0] def lowercase_ ( self : str , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Dict=None , __snake_case : bool = True , ): a : Dict = hidden_states a : Tuple = [] a : Optional[int] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens a : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] a : Tuple = self.transformer_index_for_condition[i] a : Union[str, Any] = self.transformers[transformer_index]( __snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , return_dict=__snake_case , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] a : Optional[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) a : int = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__snake_case )
297
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: lowerCAmelCase: Union[str, Any] = None lowerCAmelCase: Optional[int] = logging.get_logger(__name__) lowerCAmelCase: Tuple = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase: Union[str, Any] = { 'vocab_file': { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model', }, 'tokenizer_file': { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json', }, } lowerCAmelCase: List[Any] = { 'google/fnet-base': 5_1_2, 'google/fnet-large': 5_1_2, } lowerCAmelCase: int = '▁' class a__( lowerCamelCase__ ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = ["""input_ids""", """token_type_ids"""] lowercase__ = FNetTokenizer def __init__( self : Optional[int] , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : Dict=False , __snake_case : Optional[Any]=True , __snake_case : Union[str, Any]=True , __snake_case : int="<unk>" , __snake_case : Dict="[SEP]" , __snake_case : List[Any]="<pad>" , __snake_case : List[str]="[CLS]" , __snake_case : Tuple="[MASK]" , **__snake_case : Optional[int] , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. a : Optional[int] = ( AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case , normalized=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token ) super().__init__( __snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , ) a : Optional[Any] = do_lower_case a : List[Any] = remove_space a : Dict = keep_accents a : List[Any] = vocab_file a : Optional[int] = False if not self.vocab_file else True def lowercase_ ( self : Any , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ): a : Dict = [self.sep_token_id] a : Any = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowercase_ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ): a : List[str] = [self.sep_token_id] a : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase_ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ): if not os.path.isdir(__snake_case ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return a : Optional[Any] = os.path.join( __snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ): copyfile(self.vocab_file , __snake_case ) return (out_vocab_file,)
297
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase: Union[str, Any] = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: List[Any] = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Any = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase: Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
297
1
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class a__( lowerCamelCase__ ): lowercase__ = 42 lowercase__ = 42 def __init__( self : Optional[int] , __snake_case : UNetaDModel , __snake_case : ScoreSdeVeScheduler ): super().__init__() self.register_modules(unet=__snake_case , scheduler=__snake_case ) @torch.no_grad() def __call__( self : Union[str, Any] , __snake_case : int = 1 , __snake_case : int = 20_00 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , **__snake_case : Any , ): a : Optional[Any] = self.unet.config.sample_size a : Union[str, Any] = (batch_size, 3, img_size, img_size) a : Dict = self.unet a : List[Any] = randn_tensor(__snake_case , generator=__snake_case ) * self.scheduler.init_noise_sigma a : Tuple = sample.to(self.device ) self.scheduler.set_timesteps(__snake_case ) self.scheduler.set_sigmas(__snake_case ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): a : int = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): a : List[Any] = self.unet(__snake_case , __snake_case ).sample a : str = self.scheduler.step_correct(__snake_case , __snake_case , generator=__snake_case ).prev_sample # prediction step a : Union[str, Any] = model(__snake_case , __snake_case ).sample a : List[str] = self.scheduler.step_pred(__snake_case , __snake_case , __snake_case , generator=__snake_case ) a , a : List[Any] = output.prev_sample, output.prev_sample_mean a : Union[str, Any] = sample_mean.clamp(0 , 1 ) a : Optional[int] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a : Optional[Any] = self.numpy_to_pil(__snake_case ) if not return_dict: return (sample,) return ImagePipelineOutput(images=__snake_case )
297
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase: str = { 'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'], 'processing_mgp_str': ['MgpstrProcessor'], 'tokenization_mgp_str': ['MgpstrTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Optional[Any] = [ 'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST', 'MgpstrModel', 'MgpstrPreTrainedModel', 'MgpstrForSceneTextRecognition', ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys lowerCAmelCase: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
297
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase: Tuple = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Dict = [ 'MRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MraForMaskedLM', 'MraForMultipleChoice', 'MraForQuestionAnswering', 'MraForSequenceClassification', 'MraForTokenClassification', 'MraLayer', 'MraModel', 'MraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys lowerCAmelCase: Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
297
'''simple docstring''' import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowerCAmelCase: Dict = logging.get_logger(__name__) lowerCAmelCase: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase: List[Any] = { 'vocab_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json', }, 'merges_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt', }, 'tokenizer_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json', }, } lowerCAmelCase: str = { 'allenai/led-base-16384': 1_6_3_8_4, } class a__( lowerCamelCase__ ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = LEDTokenizer lowercase__ = ["""input_ids""", """attention_mask"""] def __init__( self : List[Any] , __snake_case : Optional[Any]=None , __snake_case : List[str]=None , __snake_case : Tuple=None , __snake_case : Dict="replace" , __snake_case : int="<s>" , __snake_case : Any="</s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : List[str]="<pad>" , __snake_case : int="<mask>" , __snake_case : int=False , __snake_case : str=True , **__snake_case : Tuple , ): super().__init__( __snake_case , __snake_case , tokenizer_file=__snake_case , errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case , **__snake_case , ) a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , __snake_case ) != add_prefix_space: a : List[Any] = getattr(__snake_case , pre_tok_state.pop('type' ) ) a : Optional[Any] = add_prefix_space a : Optional[Any] = pre_tok_class(**__snake_case ) a : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` a : Dict = 'post_processor' a : int = getattr(self.backend_tokenizer , __snake_case , __snake_case ) if tokenizer_component_instance: a : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a : Any = tuple(state['sep'] ) if "cls" in state: a : Any = tuple(state['cls'] ) a : Optional[Any] = False if state.get('add_prefix_space' , __snake_case ) != add_prefix_space: a : Any = add_prefix_space a : Optional[Any] = True if state.get('trim_offsets' , __snake_case ) != trim_offsets: a : List[Any] = trim_offsets a : Union[str, Any] = True if changes_to_apply: a : int = getattr(__snake_case , state.pop('type' ) ) a : List[Any] = component_class(**__snake_case ) setattr(self.backend_tokenizer , __snake_case , __snake_case ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def lowercase_ ( self : Dict ): if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def lowercase_ ( self : Dict , __snake_case : List[str] ): a : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else value a : Optional[int] = value def lowercase_ ( self : Optional[Any] , *__snake_case : Any , **__snake_case : Union[str, Any] ): a : Dict = kwargs.get('is_split_into_words' , __snake_case ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*__snake_case , **__snake_case ) def lowercase_ ( self : Union[str, Any] , *__snake_case : Optional[int] , **__snake_case : List[str] ): a : Optional[int] = kwargs.get('is_split_into_words' , __snake_case ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.' ) return super()._encode_plus(*__snake_case , **__snake_case ) def lowercase_ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ): a : Union[str, Any] = self._tokenizer.model.save(__snake_case , name=__snake_case ) return tuple(__snake_case ) def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : int=None ): a : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ): a : int = [self.sep_token_id] a : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase_ ( self : List[str] , __snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ): a : Optional[Any] = super()._pad( encoded_inputs=__snake_case , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , ) # Load from model defaults if return_attention_mask is None: a : str = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: a : Any = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. a : Union[str, Any] = len(encoded_inputs['global_attention_mask'] ) != len(__snake_case ) if needs_to_be_padded: a : str = len(__snake_case ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` a : Dict = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": a : Union[str, Any] = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
297
1
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) lowerCAmelCase: int = [ 'cross_validation.py', 'gradient_accumulation.py', 'local_sgd.py', 'multi_process_metrics.py', 'memory.py', 'automatic_gradient_accumulation.py', 'fsdp_with_peak_mem_tracking.py', 'deepspeed_with_config_support.py', 'megatron_lm_gpt_pretraining.py', ] class a__( unittest.TestCase ): def lowercase_ ( self : int , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ): a : Optional[int] = None a : Tuple = os.path.abspath(os.path.join('examples' , 'by_feature' ) ) a : List[str] = os.path.abspath('examples' ) for item in os.listdir(__snake_case ): if item not in EXCLUDE_EXAMPLES: a : int = os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ) and ".py" in item_path: with self.subTest( tested_script=__snake_case , feature_script=__snake_case , tested_section='main()' if parser_only else 'training_function()' , ): a : List[Any] = compare_against_test( os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case ) a : Union[str, Any] = '\n'.join(__snake_case ) if special_strings is not None: for string in special_strings: a : Union[str, Any] = diff.replace(__snake_case , '' ) self.assertEqual(__snake_case , '' ) def lowercase_ ( self : Optional[Any] ): self.one_complete_example('complete_nlp_example.py' , __snake_case ) self.one_complete_example('complete_nlp_example.py' , __snake_case ) def lowercase_ ( self : Any ): a : Dict = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) ) a : int = [ ' ' * 16 + '{\n\n', ' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n', ' ' * 20 + '"f1": eval_metric["f1"],\n\n', ' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n', ' ' * 20 + '"epoch": epoch,\n\n', ' ' * 16 + '},\n\n', ' ' * 16 + 'step=epoch,\n', ' ' * 12, ' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n', ] self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case ) self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case ) @mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} ) class a__( lowerCamelCase__ ): lowercase__ = False @classmethod def lowercase_ ( cls : Optional[int] ): super().setUpClass() a : List[str] = tempfile.mkdtemp() a : Tuple = os.path.join(cls._tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) a : Optional[int] = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def lowercase_ ( cls : Optional[int] ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def lowercase_ ( self : Tuple ): a : Union[str, Any] = F""" examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) ) def lowercase_ ( self : Dict ): a : Union[str, Any] = F""" examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} """.split() a : int = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) ) def lowercase_ ( self : Any ): a : Tuple = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )} """.split() a : int = run_command(self._launch_args + testargs , return_stdout=__snake_case ) self.assertNotIn('epoch 0:' , __snake_case ) self.assertIn('epoch 1:' , __snake_case ) def lowercase_ ( self : int ): a : Optional[int] = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )} """.split() a : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=__snake_case ) if torch.cuda.is_available(): a : Any = torch.cuda.device_count() else: a : str = 1 if num_processes > 1: self.assertNotIn('epoch 0:' , __snake_case ) self.assertIn('epoch 1:' , __snake_case ) else: self.assertIn('epoch 0:' , __snake_case ) self.assertIn('epoch 1:' , __snake_case ) @slow def lowercase_ ( self : Tuple ): a : Tuple = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split() with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ): a : Any = run_command(self._launch_args + testargs , return_stdout=__snake_case ) a : Optional[Any] = re.findall('({.+})' , __snake_case ) a : str = [r for r in results if 'accuracy' in r][-1] a : str = ast.literal_eval(__snake_case ) self.assertGreaterEqual(results['accuracy'] , 0.75 ) def lowercase_ ( self : Optional[int] ): a : int = ['examples/by_feature/multi_process_metrics.py'] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def lowercase_ ( self : Optional[int] ): with tempfile.TemporaryDirectory() as tmpdir: a : Optional[Any] = F""" examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__snake_case , 'tracking' ) ) ) def lowercase_ ( self : List[str] ): a : Optional[Any] = ['examples/by_feature/gradient_accumulation.py'] run_command(self._launch_args + testargs ) def lowercase_ ( self : int ): a : Optional[Any] = ['examples/by_feature/local_sgd.py'] run_command(self._launch_args + testargs )
297
'''simple docstring''' import copy import os import cva import numpy as np from matplotlib import pyplot as plt class a__: def __init__( self : Tuple ): a : Optional[int] = '' a : Optional[Any] = '' a : str = [] a : int = 0 a : str = 2_56 a : Union[str, Any] = 0 a : Any = 0 a : Optional[int] = 0 a : List[str] = 0 def lowercase_ ( self : str , __snake_case : str ): a : Any = cva.imread(__snake_case , 0 ) a : Optional[Any] = copy.deepcopy(self.img ) a , a , a : int = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' ) a : Optional[int] = np.sum(__snake_case ) for i in range(len(__snake_case ) ): a : Optional[Any] = x[i] / self.k self.sk += prk a : str = (self.L - 1) * self.sk if self.rem != 0: a : Optional[int] = int(last % last ) a : int = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(__snake_case ) a : str = int(np.ma.count(self.img ) / self.img[1].size ) a : Optional[int] = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): a : Any = self.img[j][i] if num != self.last_list[num]: a : str = self.last_list[num] cva.imwrite('output_data/output.jpg' , self.img ) def lowercase_ ( self : Dict ): plt.hist(self.img.ravel() , 2_56 , [0, 2_56] ) def lowercase_ ( self : List[Any] ): cva.imshow('Output-Image' , self.img ) cva.imshow('Input-Image' , self.original_image ) cva.waitKey(50_00 ) cva.destroyAllWindows() if __name__ == "__main__": lowerCAmelCase: Optional[Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg') lowerCAmelCase: Tuple = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
297
1
'''simple docstring''' def lowerCamelCase__ ( _A ): a : int = 1 a : List[str] = 2 while i * i <= n: a : Any = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def lowerCamelCase__ ( ): a : Union[str, Any] = 1 a : Any = 1 while True: i += 1 t_num += i if count_divisors(_A ) > 500: break return t_num if __name__ == "__main__": print(solution())
297
'''simple docstring''' import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class a__: def __init__( self : List[Any] , __snake_case : Union[str, Any] ): if isinstance(__snake_case , __snake_case ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden a : str = deepcopy(__snake_case ) elif os.path.exists(__snake_case ): with io.open(__snake_case , 'r' , encoding='utf-8' ) as f: a : Optional[Any] = json.load(__snake_case ) else: try: a : Any = baseaa.urlsafe_baadecode(__snake_case ).decode('utf-8' ) a : Union[str, Any] = json.loads(__snake_case ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" ) a : List[str] = config self.set_stage_and_offload() def lowercase_ ( self : List[str] ): # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs to know the dtype, and some other hparams. a : Dict = self.get_value('zero_optimization.stage' , -1 ) # offload a : str = False if self.is_zeroa() or self.is_zeroa(): a : Union[str, Any] = set(['cpu', 'nvme'] ) a : Optional[Any] = set( [ self.get_value('zero_optimization.offload_optimizer.device' ), self.get_value('zero_optimization.offload_param.device' ), ] ) if len(offload_devices & offload_devices_valid ) > 0: a : List[str] = True def lowercase_ ( self : Optional[Any] , __snake_case : Optional[Any] ): a : str = self.config # find the config node of interest if it exists a : List[str] = ds_key_long.split('.' ) a : Dict = nodes.pop() for node in nodes: a : List[Any] = config.get(__snake_case ) if config is None: return None, ds_key return config, ds_key def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any=None ): a , a : List[Any] = self.find_config_node(__snake_case ) if config is None: return default return config.get(__snake_case , __snake_case ) def lowercase_ ( self : int , __snake_case : Optional[Any] , __snake_case : List[str]=False ): a : Optional[Any] = self.config # find the config node of interest if it exists a : List[str] = ds_key_long.split('.' ) for node in nodes: a : str = config a : Dict = config.get(__snake_case ) if config is None: if must_exist: raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" ) else: return # if found remove it if parent_config is not None: parent_config.pop(__snake_case ) def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] ): a : Union[str, Any] = self.get_value(__snake_case ) return False if value is None else bool(__snake_case ) def lowercase_ ( self : Union[str, Any] , __snake_case : str ): a : Optional[Any] = self.get_value(__snake_case ) return False if value is None else not bool(__snake_case ) def lowercase_ ( self : Optional[Any] ): return self._stage == 2 def lowercase_ ( self : Union[str, Any] ): return self._stage == 3 def lowercase_ ( self : str ): return self._offload class a__: def __init__( self : Tuple , __snake_case : str ): a : Optional[Any] = engine def lowercase_ ( self : Union[str, Any] , __snake_case : str , **__snake_case : Tuple ): # runs backpropagation and handles mixed precision self.engine.backward(__snake_case , **__snake_case ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class a__( lowerCamelCase__ ): def __init__( self : str , __snake_case : List[str] ): super().__init__(__snake_case , device_placement=__snake_case , scaler=__snake_case ) a : Optional[Any] = hasattr(self.optimizer , 'overflow' ) def lowercase_ ( self : Dict , __snake_case : Dict=None ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def lowercase_ ( self : Optional[Any] ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def lowercase_ ( self : Tuple ): if self.__has_overflow__: return self.optimizer.overflow return False class a__( lowerCamelCase__ ): def __init__( self : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ): super().__init__(__snake_case , __snake_case ) def lowercase_ ( self : Any ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class a__: def __init__( self : List[Any] , __snake_case : str , __snake_case : Dict=0.001 , __snake_case : Union[str, Any]=0 , **__snake_case : List[Any] ): a : Optional[Any] = params a : str = lr a : List[str] = weight_decay a : str = kwargs class a__: def __init__( self : str , __snake_case : Optional[Any] , __snake_case : List[str]=None , __snake_case : Tuple=0 , **__snake_case : Any ): a : Union[str, Any] = optimizer a : Any = total_num_steps a : List[str] = warmup_num_steps a : int = kwargs
297
1
'''simple docstring''' import argparse from collections import defaultdict import yaml lowerCAmelCase: Any = 'docs/source/en/_toctree.yml' def lowerCamelCase__ ( _A ): a : Optional[Any] = defaultdict(_A ) a : List[Any] = [] a : List[Any] = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({'local': doc['local'], 'title': doc['title']} ) else: new_doc_list.append(_A ) a : Any = new_doc_list a : List[str] = [key for key, value in counts.items() if value > 1] a : List[str] = [] for duplicate_key in duplicates: a : int = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} ) if len(_A ) > 1: raise ValueError( f"""{duplicate_key} is present several times in the documentation table of content at """ '`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ' 'others.' ) # Only add this once new_doc.append({'local': duplicate_key, 'title': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] ) a : str = sorted(_A , key=lambda _A : s["title"].lower() ) # "overview" gets special treatment and is always first if len(_A ) > 1: raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' ) overview_doc.extend(_A ) # Sort return overview_doc def lowerCamelCase__ ( _A=False ): with open(_A , encoding='utf-8' ) as f: a : int = yaml.safe_load(f.read() ) # Get to the API doc a : Tuple = 0 while content[api_idx]["title"] != "API": api_idx += 1 a : Any = content[api_idx]['sections'] # Then to the model doc a : List[str] = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 a : List[str] = api_doc[scheduler_idx]['sections'] a : Union[str, Any] = clean_doc_toc(_A ) a : Any = False if new_scheduler_doc != scheduler_doc: a : Any = True if overwrite: a : Tuple = new_scheduler_doc if diff: if overwrite: a : Dict = api_doc with open(_A , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(_A , allow_unicode=_A ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) def lowerCamelCase__ ( _A=False ): with open(_A , encoding='utf-8' ) as f: a : str = yaml.safe_load(f.read() ) # Get to the API doc a : str = 0 while content[api_idx]["title"] != "API": api_idx += 1 a : Dict = content[api_idx]['sections'] # Then to the model doc a : List[str] = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 a : int = False a : Optional[int] = api_doc[pipeline_idx]['sections'] a : Union[str, Any] = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: a : Any = pipeline_doc['section'] a : Optional[Any] = clean_doc_toc(_A ) if overwrite: a : int = new_sub_pipeline_doc new_pipeline_docs.append(_A ) # sort overall pipeline doc a : Dict = clean_doc_toc(_A ) if new_pipeline_docs != pipeline_docs: a : Optional[Any] = True if overwrite: a : Optional[int] = new_pipeline_docs if diff: if overwrite: a : Dict = api_doc with open(_A , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(_A , allow_unicode=_A ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) if __name__ == "__main__": lowerCAmelCase: Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowerCAmelCase: Union[str, Any] = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
297
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) lowerCAmelCase: int = [ 'cross_validation.py', 'gradient_accumulation.py', 'local_sgd.py', 'multi_process_metrics.py', 'memory.py', 'automatic_gradient_accumulation.py', 'fsdp_with_peak_mem_tracking.py', 'deepspeed_with_config_support.py', 'megatron_lm_gpt_pretraining.py', ] class a__( unittest.TestCase ): def lowercase_ ( self : int , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ): a : Optional[int] = None a : Tuple = os.path.abspath(os.path.join('examples' , 'by_feature' ) ) a : List[str] = os.path.abspath('examples' ) for item in os.listdir(__snake_case ): if item not in EXCLUDE_EXAMPLES: a : int = os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ) and ".py" in item_path: with self.subTest( tested_script=__snake_case , feature_script=__snake_case , tested_section='main()' if parser_only else 'training_function()' , ): a : List[Any] = compare_against_test( os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case ) a : Union[str, Any] = '\n'.join(__snake_case ) if special_strings is not None: for string in special_strings: a : Union[str, Any] = diff.replace(__snake_case , '' ) self.assertEqual(__snake_case , '' ) def lowercase_ ( self : Optional[Any] ): self.one_complete_example('complete_nlp_example.py' , __snake_case ) self.one_complete_example('complete_nlp_example.py' , __snake_case ) def lowercase_ ( self : Any ): a : Dict = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) ) a : int = [ ' ' * 16 + '{\n\n', ' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n', ' ' * 20 + '"f1": eval_metric["f1"],\n\n', ' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n', ' ' * 20 + '"epoch": epoch,\n\n', ' ' * 16 + '},\n\n', ' ' * 16 + 'step=epoch,\n', ' ' * 12, ' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n', ] self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case ) self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case ) @mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} ) class a__( lowerCamelCase__ ): lowercase__ = False @classmethod def lowercase_ ( cls : Optional[int] ): super().setUpClass() a : List[str] = tempfile.mkdtemp() a : Tuple = os.path.join(cls._tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) a : Optional[int] = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def lowercase_ ( cls : Optional[int] ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def lowercase_ ( self : Tuple ): a : Union[str, Any] = F""" examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) ) def lowercase_ ( self : Dict ): a : Union[str, Any] = F""" examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} """.split() a : int = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) ) def lowercase_ ( self : Any ): a : Tuple = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )} """.split() a : int = run_command(self._launch_args + testargs , return_stdout=__snake_case ) self.assertNotIn('epoch 0:' , __snake_case ) self.assertIn('epoch 1:' , __snake_case ) def lowercase_ ( self : int ): a : Optional[int] = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )} """.split() a : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=__snake_case ) if torch.cuda.is_available(): a : Any = torch.cuda.device_count() else: a : str = 1 if num_processes > 1: self.assertNotIn('epoch 0:' , __snake_case ) self.assertIn('epoch 1:' , __snake_case ) else: self.assertIn('epoch 0:' , __snake_case ) self.assertIn('epoch 1:' , __snake_case ) @slow def lowercase_ ( self : Tuple ): a : Tuple = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split() with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ): a : Any = run_command(self._launch_args + testargs , return_stdout=__snake_case ) a : Optional[Any] = re.findall('({.+})' , __snake_case ) a : str = [r for r in results if 'accuracy' in r][-1] a : str = ast.literal_eval(__snake_case ) self.assertGreaterEqual(results['accuracy'] , 0.75 ) def lowercase_ ( self : Optional[int] ): a : int = ['examples/by_feature/multi_process_metrics.py'] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def lowercase_ ( self : Optional[int] ): with tempfile.TemporaryDirectory() as tmpdir: a : Optional[Any] = F""" examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__snake_case , 'tracking' ) ) ) def lowercase_ ( self : List[str] ): a : Optional[Any] = ['examples/by_feature/gradient_accumulation.py'] run_command(self._launch_args + testargs ) def lowercase_ ( self : int ): a : Optional[Any] = ['examples/by_feature/local_sgd.py'] run_command(self._launch_args + testargs )
297
1
'''simple docstring''' import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print('Googling.....') lowerCAmelCase: List[str] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:]) lowerCAmelCase: Union[str, Any] = requests.get(url, headers={'UserAgent': UserAgent().random}) # res.raise_for_status() with open('project1a.html', 'wb') as out_file: # only for knowing the class for data in res.iter_content(1_0_0_0_0): out_file.write(data) lowerCAmelCase: Union[str, Any] = BeautifulSoup(res.text, 'html.parser') lowerCAmelCase: Optional[int] = list(soup.select('.eZt8xd'))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get('href')) else: webbrowser.open(F"https://google.com{link.get('href')}")
297
'''simple docstring''' import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList lowerCAmelCase: List[str] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class a__( lowerCamelCase__ ): def __init__( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : Optional[Any]=1 ): a : Union[str, Any] = tokenizer a : Union[str, Any] = dataset a : Any = len(__snake_case ) if n_tasks is None else n_tasks a : List[str] = n_copies def __iter__( self : str ): a : List[Any] = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() ) a : Dict = self.tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class a__( lowerCamelCase__ ): def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str ): a : Dict = start_length a : Dict = eof_strings a : str = tokenizer def __call__( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : Union[str, Any] ): a : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) a : Optional[int] = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(__snake_case ) def lowerCamelCase__ ( _A ): a : Optional[Any] = re.split('(%s)' % '|'.join(_A ) , _A ) # last string should be "" return "".join(string_list[:-2] ) def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A=20 , **_A ): a : Optional[Any] = defaultdict(_A ) # dict of list of generated tokens for step, batch in tqdm(enumerate(_A ) ): with torch.no_grad(): a : Optional[Any] = batch['ids'].shape[-1] a : Optional[Any] = accelerator.unwrap_model(_A ).generate( input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_A , **_A ) # each task is generated batch_size times a : Tuple = batch['task_id'].repeat(_A ) a : List[Any] = accelerator.pad_across_processes( _A , dim=1 , pad_index=tokenizer.pad_token_id ) a , a : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) ) a : List[str] = generated_tokens.cpu().numpy() a : int = generated_tasks.cpu().numpy() for task, generated_tokens in zip(_A , _A ): gen_token_dict[task].append(_A ) a : Any = [[] for _ in range(_A )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: a : Optional[int] = tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A ) code_gens[task].append(remove_last_block(_A ) ) return code_gens def lowerCamelCase__ ( ): # Setup configuration a : Dict = HfArgumentParser(_A ) a : Any = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric a : List[Any] = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing a : int = 'false' if args.num_workers is None: a : Dict = multiprocessing.cpu_count() # Use dataset load to feed to accelerate a : List[Any] = Accelerator() set_seed(args.seed , device_specific=_A ) # Load model and tokenizer a : Dict = AutoTokenizer.from_pretrained(args.model_ckpt ) a : str = tokenizer.eos_token a : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings a : Optional[Any] = { 'do_sample': args.do_sample, 'temperature': args.temperature, 'max_new_tokens': args.max_new_tokens, 'top_p': args.top_p, 'top_k': args.top_k, 'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _A , _A )] ), } # Load evaluation dataset and metric a : Optional[int] = load_dataset('openai_humaneval' ) a : Optional[Any] = load_metric('code_eval' ) a : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] ) a : Optional[Any] = args.n_samples // args.batch_size a : Any = TokenizedDataset(_A , human_eval['test'] , n_copies=_A , n_tasks=_A ) # do not confuse args.batch_size, which is actually the num_return_sequences a : int = DataLoader(_A , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: a : int = code_eval_metric.compute(references=[''] , predictions=[['']] ) except ValueError as exception: print( 'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`' ' flag to enable code evaluation.' ) raise exception a , a : int = accelerator.prepare(_A , _A ) a : int = complete_code( _A , _A , _A , _A , n_tasks=_A , batch_size=args.batch_size , **_A , ) if accelerator.is_main_process: a : List[str] = [] for task in tqdm(range(_A ) ): a : int = human_eval['test'][task]['test'] a : int = f"""check({human_eval["test"][task]["entry_point"]})""" references.append('\n' + test_func + '\n' + entry_point ) # Evaluate completions with "code_eval" metric a , a : Tuple = code_eval_metric.compute( references=_A , predictions=_A , num_workers=args.num_workers ) print(f"""Results: {pass_at_k}""" ) # Save results to json file with open(args.output_file , 'w' ) as fp: json.dump(_A , _A ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
297
1
'''simple docstring''' import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def lowerCamelCase__ ( _A , _A , _A ): if isinstance(_A , torch.Tensor ): return image elif isinstance(_A , PIL.Image.Image ): a : Any = [image] if isinstance(image[0] , PIL.Image.Image ): a : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image] a : int = np.concatenate(_A , axis=0 ) a : int = np.array(_A ).astype(np.floataa ) / 255.0 a : str = image.transpose(0 , 3 , 1 , 2 ) a : str = 2.0 * image - 1.0 a : Optional[int] = torch.from_numpy(_A ) elif isinstance(image[0] , torch.Tensor ): a : Optional[Any] = torch.cat(_A , dim=0 ) return image def lowerCamelCase__ ( _A , _A , _A , _A=0.9995 ): if not isinstance(_A , np.ndarray ): a : Dict = True a : Optional[Any] = va.device a : Optional[int] = va.cpu().numpy() a : Union[str, Any] = va.cpu().numpy() a : Any = np.sum(va * va / (np.linalg.norm(_A ) * np.linalg.norm(_A )) ) if np.abs(_A ) > DOT_THRESHOLD: a : Any = (1 - t) * va + t * va else: a : Any = np.arccos(_A ) a : Tuple = np.sin(_A ) a : Optional[Any] = theta_a * t a : List[Any] = np.sin(_A ) a : Dict = np.sin(theta_a - theta_t ) / sin_theta_a a : int = sin_theta_t / sin_theta_a a : Any = sa * va + sa * va if inputs_are_torch: a : Dict = torch.from_numpy(_A ).to(_A ) return va def lowerCamelCase__ ( _A , _A ): a : Optional[int] = F.normalize(_A , dim=-1 ) a : str = F.normalize(_A , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def lowerCamelCase__ ( _A , _A ): for param in model.parameters(): a : int = value class a__( lowerCamelCase__ ): def __init__( self : str , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __snake_case : CLIPFeatureExtractor , __snake_case : List[str]=None , __snake_case : List[str]=None , __snake_case : List[Any]=None , ): super().__init__() self.register_modules( vae=__snake_case , text_encoder=__snake_case , clip_model=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , feature_extractor=__snake_case , coca_model=__snake_case , coca_tokenizer=__snake_case , coca_transform=__snake_case , ) a : Optional[Any] = ( feature_extractor.size if isinstance(feature_extractor.size , __snake_case ) else feature_extractor.size['shortest_edge'] ) a : Optional[int] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , __snake_case ) set_requires_grad(self.clip_model , __snake_case ) def lowercase_ ( self : int , __snake_case : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory a : Union[str, Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__snake_case ) def lowercase_ ( self : Union[str, Any] ): self.enable_attention_slicing(__snake_case ) def lowercase_ ( self : Optional[Any] ): set_requires_grad(self.vae , __snake_case ) def lowercase_ ( self : Tuple ): set_requires_grad(self.vae , __snake_case ) def lowercase_ ( self : int ): set_requires_grad(self.unet , __snake_case ) def lowercase_ ( self : Union[str, Any] ): set_requires_grad(self.unet , __snake_case ) def lowercase_ ( self : int , __snake_case : Dict , __snake_case : str , __snake_case : Optional[int] ): # get the original timestep using init_timestep a : Optional[Any] = min(int(num_inference_steps * strength ) , __snake_case ) a : Union[str, Any] = max(num_inference_steps - init_timestep , 0 ) a : List[Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def lowercase_ ( self : Dict , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any]=None ): if not isinstance(__snake_case , torch.Tensor ): raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(__snake_case )}""" ) a : Optional[Any] = image.to(device=__snake_case , dtype=__snake_case ) if isinstance(__snake_case , __snake_case ): a : Optional[int] = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case ) ] a : Optional[Any] = torch.cat(__snake_case , dim=0 ) else: a : Union[str, Any] = self.vae.encode(__snake_case ).latent_dist.sample(__snake_case ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : List[str] = 0.18215 * init_latents a : str = init_latents.repeat_interleave(__snake_case , dim=0 ) a : Dict = randn_tensor(init_latents.shape , generator=__snake_case , device=__snake_case , dtype=__snake_case ) # get latents a : Dict = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case ) a : int = init_latents return latents def lowercase_ ( self : List[str] , __snake_case : Dict ): a : List[Any] = self.coca_transform(__snake_case ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): a : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) a : Union[str, Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' ) def lowercase_ ( self : Tuple , __snake_case : Any , __snake_case : Optional[Any] ): a : List[Any] = self.feature_extractor.preprocess(__snake_case ) a : Optional[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half() a : int = self.clip_model.get_image_features(__snake_case ) a : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case ) a : Tuple = image_embeddings_clip.repeat_interleave(__snake_case , dim=0 ) return image_embeddings_clip @torch.enable_grad() def lowercase_ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[Any] , ): a : Optional[Any] = latents.detach().requires_grad_() a : List[Any] = self.scheduler.scale_model_input(__snake_case , __snake_case ) # predict the noise residual a : Any = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): a : int = self.scheduler.alphas_cumprod[timestep] a : Any = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf a : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 a : Tuple = torch.sqrt(__snake_case ) a : str = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , __snake_case ): a : List[Any] = self.scheduler.sigmas[index] a : Optional[int] = latents - sigma * noise_pred else: raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : Union[str, Any] = 1 / 0.18215 * sample a : str = self.vae.decode(__snake_case ).sample a : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) a : Tuple = transforms.Resize(self.feature_extractor_size )(__snake_case ) a : List[str] = self.normalize(__snake_case ).to(latents.dtype ) a : List[str] = self.clip_model.get_image_features(__snake_case ) a : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case ) a : int = spherical_dist_loss(__snake_case , __snake_case ).mean() * clip_guidance_scale a : List[str] = -torch.autograd.grad(__snake_case , __snake_case )[0] if isinstance(self.scheduler , __snake_case ): a : List[Any] = latents.detach() + grads * (sigma**2) a : Optional[int] = noise_pred_original else: a : List[Any] = noise_pred_original - torch.sqrt(__snake_case ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : Optional[int] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[int] = 5_12 , __snake_case : Optional[int] = 5_12 , __snake_case : float = 0.6 , __snake_case : Optional[int] = 50 , __snake_case : Optional[float] = 7.5 , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[float] = 1_00 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : float = 0.8 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , ): if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size: raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(__snake_case )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(__snake_case , torch.Generator ) and batch_size > 1: a : Dict = [generator] + [None] * (batch_size - 1) a : Any = [ ('model', self.coca_model is None), ('tokenizer', self.coca_tokenizer is None), ('transform', self.coca_transform is None), ] a : List[str] = [x[0] for x in coca_is_none if x[1]] a : List[str] = ', '.join(__snake_case ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(__snake_case ): raise ValueError( F"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) a : int = self.get_image_description(__snake_case ) if style_prompt is None: if len(__snake_case ): raise ValueError( F"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) a : Union[str, Any] = self.get_image_description(__snake_case ) # get prompt text embeddings for content and style a : Optional[Any] = self.tokenizer( __snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , ) a : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] a : Dict = self.tokenizer( __snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , ) a : Dict = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] a : Any = slerp(__snake_case , __snake_case , __snake_case ) # duplicate text embeddings for each generation per prompt a : Optional[Any] = text_embeddings.repeat_interleave(__snake_case , dim=0 ) # set timesteps a : int = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) a : Any = {} if accepts_offset: a : Optional[Any] = 1 self.scheduler.set_timesteps(__snake_case , **__snake_case ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) a , a : Tuple = self.get_timesteps(__snake_case , __snake_case , self.device ) a : Optional[int] = timesteps[:1].repeat(__snake_case ) # Preprocess image a : Optional[Any] = preprocess(__snake_case , __snake_case , __snake_case ) a : List[Any] = self.prepare_latents( __snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case ) a : str = preprocess(__snake_case , __snake_case , __snake_case ) a : Union[str, Any] = self.prepare_latents( __snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case ) a : Union[str, Any] = slerp(__snake_case , __snake_case , __snake_case ) if clip_guidance_scale > 0: a : Dict = self.get_clip_image_embeddings(__snake_case , __snake_case ) a : int = self.get_clip_image_embeddings(__snake_case , __snake_case ) a : List[str] = slerp( __snake_case , __snake_case , __snake_case ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. a : int = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: a : Any = content_text_input.input_ids.shape[-1] a : List[Any] = self.tokenizer([''] , padding='max_length' , max_length=__snake_case , return_tensors='pt' ) a : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt a : Dict = uncond_embeddings.repeat_interleave(__snake_case , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes a : Any = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. a : List[str] = (batch_size, self.unet.config.in_channels, height // 8, width // 8) a : List[str] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps a : int = torch.randn(__snake_case , generator=__snake_case , device='cpu' , dtype=__snake_case ).to( self.device ) else: a : Optional[int] = torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case ) else: if latents.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) a : List[str] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler a : Any = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] a : Optional[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) a : Union[str, Any] = {} if accepts_eta: a : List[str] = eta # check if the scheduler accepts generator a : List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: a : Any = generator with self.progress_bar(total=__snake_case ): for i, t in enumerate(__snake_case ): # expand the latents if we are doing classifier free guidance a : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a : Dict = self.scheduler.scale_model_input(__snake_case , __snake_case ) # predict the noise residual a : List[Any] = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample # perform classifier free guidance if do_classifier_free_guidance: a , a : List[str] = noise_pred.chunk(2 ) a : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: a : Optional[Any] = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) a , a : Union[str, Any] = self.cond_fn( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) # compute the previous noisy sample x_t -> x_t-1 a : Any = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : Tuple = 1 / 0.18215 * latents a : Optional[int] = self.vae.decode(__snake_case ).sample a : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) a : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a : str = self.numpy_to_pil(__snake_case ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case )
297
'''simple docstring''' import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def lowerCamelCase__ ( _A , _A , _A ): if isinstance(_A , torch.Tensor ): return image elif isinstance(_A , PIL.Image.Image ): a : Any = [image] if isinstance(image[0] , PIL.Image.Image ): a : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image] a : int = np.concatenate(_A , axis=0 ) a : int = np.array(_A ).astype(np.floataa ) / 255.0 a : str = image.transpose(0 , 3 , 1 , 2 ) a : str = 2.0 * image - 1.0 a : Optional[int] = torch.from_numpy(_A ) elif isinstance(image[0] , torch.Tensor ): a : Optional[Any] = torch.cat(_A , dim=0 ) return image def lowerCamelCase__ ( _A , _A , _A , _A=0.9995 ): if not isinstance(_A , np.ndarray ): a : Dict = True a : Optional[Any] = va.device a : Optional[int] = va.cpu().numpy() a : Union[str, Any] = va.cpu().numpy() a : Any = np.sum(va * va / (np.linalg.norm(_A ) * np.linalg.norm(_A )) ) if np.abs(_A ) > DOT_THRESHOLD: a : Any = (1 - t) * va + t * va else: a : Any = np.arccos(_A ) a : Tuple = np.sin(_A ) a : Optional[Any] = theta_a * t a : List[Any] = np.sin(_A ) a : Dict = np.sin(theta_a - theta_t ) / sin_theta_a a : int = sin_theta_t / sin_theta_a a : Any = sa * va + sa * va if inputs_are_torch: a : Dict = torch.from_numpy(_A ).to(_A ) return va def lowerCamelCase__ ( _A , _A ): a : Optional[int] = F.normalize(_A , dim=-1 ) a : str = F.normalize(_A , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def lowerCamelCase__ ( _A , _A ): for param in model.parameters(): a : int = value class a__( lowerCamelCase__ ): def __init__( self : str , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __snake_case : CLIPFeatureExtractor , __snake_case : List[str]=None , __snake_case : List[str]=None , __snake_case : List[Any]=None , ): super().__init__() self.register_modules( vae=__snake_case , text_encoder=__snake_case , clip_model=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , feature_extractor=__snake_case , coca_model=__snake_case , coca_tokenizer=__snake_case , coca_transform=__snake_case , ) a : Optional[Any] = ( feature_extractor.size if isinstance(feature_extractor.size , __snake_case ) else feature_extractor.size['shortest_edge'] ) a : Optional[int] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , __snake_case ) set_requires_grad(self.clip_model , __snake_case ) def lowercase_ ( self : int , __snake_case : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory a : Union[str, Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__snake_case ) def lowercase_ ( self : Union[str, Any] ): self.enable_attention_slicing(__snake_case ) def lowercase_ ( self : Optional[Any] ): set_requires_grad(self.vae , __snake_case ) def lowercase_ ( self : Tuple ): set_requires_grad(self.vae , __snake_case ) def lowercase_ ( self : int ): set_requires_grad(self.unet , __snake_case ) def lowercase_ ( self : Union[str, Any] ): set_requires_grad(self.unet , __snake_case ) def lowercase_ ( self : int , __snake_case : Dict , __snake_case : str , __snake_case : Optional[int] ): # get the original timestep using init_timestep a : Optional[Any] = min(int(num_inference_steps * strength ) , __snake_case ) a : Union[str, Any] = max(num_inference_steps - init_timestep , 0 ) a : List[Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def lowercase_ ( self : Dict , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any]=None ): if not isinstance(__snake_case , torch.Tensor ): raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(__snake_case )}""" ) a : Optional[Any] = image.to(device=__snake_case , dtype=__snake_case ) if isinstance(__snake_case , __snake_case ): a : Optional[int] = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case ) ] a : Optional[Any] = torch.cat(__snake_case , dim=0 ) else: a : Union[str, Any] = self.vae.encode(__snake_case ).latent_dist.sample(__snake_case ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : List[str] = 0.18215 * init_latents a : str = init_latents.repeat_interleave(__snake_case , dim=0 ) a : Dict = randn_tensor(init_latents.shape , generator=__snake_case , device=__snake_case , dtype=__snake_case ) # get latents a : Dict = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case ) a : int = init_latents return latents def lowercase_ ( self : List[str] , __snake_case : Dict ): a : List[Any] = self.coca_transform(__snake_case ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): a : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) a : Union[str, Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' ) def lowercase_ ( self : Tuple , __snake_case : Any , __snake_case : Optional[Any] ): a : List[Any] = self.feature_extractor.preprocess(__snake_case ) a : Optional[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half() a : int = self.clip_model.get_image_features(__snake_case ) a : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case ) a : Tuple = image_embeddings_clip.repeat_interleave(__snake_case , dim=0 ) return image_embeddings_clip @torch.enable_grad() def lowercase_ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[Any] , ): a : Optional[Any] = latents.detach().requires_grad_() a : List[Any] = self.scheduler.scale_model_input(__snake_case , __snake_case ) # predict the noise residual a : Any = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): a : int = self.scheduler.alphas_cumprod[timestep] a : Any = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf a : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 a : Tuple = torch.sqrt(__snake_case ) a : str = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , __snake_case ): a : List[Any] = self.scheduler.sigmas[index] a : Optional[int] = latents - sigma * noise_pred else: raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : Union[str, Any] = 1 / 0.18215 * sample a : str = self.vae.decode(__snake_case ).sample a : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) a : Tuple = transforms.Resize(self.feature_extractor_size )(__snake_case ) a : List[str] = self.normalize(__snake_case ).to(latents.dtype ) a : List[str] = self.clip_model.get_image_features(__snake_case ) a : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case ) a : int = spherical_dist_loss(__snake_case , __snake_case ).mean() * clip_guidance_scale a : List[str] = -torch.autograd.grad(__snake_case , __snake_case )[0] if isinstance(self.scheduler , __snake_case ): a : List[Any] = latents.detach() + grads * (sigma**2) a : Optional[int] = noise_pred_original else: a : List[Any] = noise_pred_original - torch.sqrt(__snake_case ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : Optional[int] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[int] = 5_12 , __snake_case : Optional[int] = 5_12 , __snake_case : float = 0.6 , __snake_case : Optional[int] = 50 , __snake_case : Optional[float] = 7.5 , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[float] = 1_00 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : float = 0.8 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , ): if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size: raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(__snake_case )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(__snake_case , torch.Generator ) and batch_size > 1: a : Dict = [generator] + [None] * (batch_size - 1) a : Any = [ ('model', self.coca_model is None), ('tokenizer', self.coca_tokenizer is None), ('transform', self.coca_transform is None), ] a : List[str] = [x[0] for x in coca_is_none if x[1]] a : List[str] = ', '.join(__snake_case ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(__snake_case ): raise ValueError( F"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) a : int = self.get_image_description(__snake_case ) if style_prompt is None: if len(__snake_case ): raise ValueError( F"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) a : Union[str, Any] = self.get_image_description(__snake_case ) # get prompt text embeddings for content and style a : Optional[Any] = self.tokenizer( __snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , ) a : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] a : Dict = self.tokenizer( __snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , ) a : Dict = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] a : Any = slerp(__snake_case , __snake_case , __snake_case ) # duplicate text embeddings for each generation per prompt a : Optional[Any] = text_embeddings.repeat_interleave(__snake_case , dim=0 ) # set timesteps a : int = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) a : Any = {} if accepts_offset: a : Optional[Any] = 1 self.scheduler.set_timesteps(__snake_case , **__snake_case ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) a , a : Tuple = self.get_timesteps(__snake_case , __snake_case , self.device ) a : Optional[int] = timesteps[:1].repeat(__snake_case ) # Preprocess image a : Optional[Any] = preprocess(__snake_case , __snake_case , __snake_case ) a : List[Any] = self.prepare_latents( __snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case ) a : str = preprocess(__snake_case , __snake_case , __snake_case ) a : Union[str, Any] = self.prepare_latents( __snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case ) a : Union[str, Any] = slerp(__snake_case , __snake_case , __snake_case ) if clip_guidance_scale > 0: a : Dict = self.get_clip_image_embeddings(__snake_case , __snake_case ) a : int = self.get_clip_image_embeddings(__snake_case , __snake_case ) a : List[str] = slerp( __snake_case , __snake_case , __snake_case ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. a : int = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: a : Any = content_text_input.input_ids.shape[-1] a : List[Any] = self.tokenizer([''] , padding='max_length' , max_length=__snake_case , return_tensors='pt' ) a : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt a : Dict = uncond_embeddings.repeat_interleave(__snake_case , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes a : Any = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. a : List[str] = (batch_size, self.unet.config.in_channels, height // 8, width // 8) a : List[str] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps a : int = torch.randn(__snake_case , generator=__snake_case , device='cpu' , dtype=__snake_case ).to( self.device ) else: a : Optional[int] = torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case ) else: if latents.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) a : List[str] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler a : Any = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] a : Optional[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) a : Union[str, Any] = {} if accepts_eta: a : List[str] = eta # check if the scheduler accepts generator a : List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: a : Any = generator with self.progress_bar(total=__snake_case ): for i, t in enumerate(__snake_case ): # expand the latents if we are doing classifier free guidance a : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a : Dict = self.scheduler.scale_model_input(__snake_case , __snake_case ) # predict the noise residual a : List[Any] = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample # perform classifier free guidance if do_classifier_free_guidance: a , a : List[str] = noise_pred.chunk(2 ) a : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: a : Optional[Any] = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) a , a : Union[str, Any] = self.cond_fn( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) # compute the previous noisy sample x_t -> x_t-1 a : Any = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : Tuple = 1 / 0.18215 * latents a : Optional[int] = self.vae.decode(__snake_case ).sample a : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) a : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a : str = self.numpy_to_pil(__snake_case ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case )
297
1
'''simple docstring''' from collections import namedtuple import requests from lxml import html # type: ignore lowerCAmelCase: Any = namedtuple('covid_data', 'cases deaths recovered') def lowerCamelCase__ ( _A = "https://www.worldometers.info/coronavirus/" ): a : Tuple = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(_A ).content ).xpath(_A ) ) lowerCAmelCase: Optional[int] = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}' print(fmt.format(*covid_stats()))
297
'''simple docstring''' def lowerCamelCase__ ( _A , _A , _A , _A , _A , ): a : Dict = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('All input parameters must be positive' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('Relative densities cannot be greater than one' ) else: a : Union[str, Any] = 1 - (matter_density + radiation_density + dark_energy) a : Union[str, Any] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) a : int = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation lowerCAmelCase: Optional[Any] = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
297
1
'''simple docstring''' from __future__ import annotations def lowerCamelCase__ ( _A , _A , _A , _A , _A , ): a : List[str] = len(_A ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_A ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _A , _A , ) def lowerCamelCase__ ( _A ): a : list[list[str]] = [] depth_first_search([] , [] , [] , _A , _A ) # Print all the boards for board in boards: for column in board: print(_A ) print('' ) print(len(_A ) , 'solutions were found.' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
297
'''simple docstring''' from __future__ import annotations import math class a__: def __init__( self : List[str] , __snake_case : int ): a : str = size # approximate the overall size of segment tree with given value a : Optional[int] = [0 for i in range(0 , 4 * size )] # create array to store lazy update a : Any = [0 for i in range(0 , 4 * size )] a : Dict = [0 for i in range(0 , 4 * size )] # flag for lazy update def lowercase_ ( self : int , __snake_case : int ): return idx * 2 def lowercase_ ( self : Dict , __snake_case : int ): return idx * 2 + 1 def lowercase_ ( self : Dict , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : list[int] ): if left_element == right_element: a : Tuple = a[left_element - 1] else: a : Tuple = (left_element + right_element) // 2 self.build(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case ) self.build(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case ) a : Union[str, Any] = max( self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] ) def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ): if self.flag[idx] is True: a : int = self.lazy[idx] a : Union[str, Any] = False if left_element != right_element: a : Dict = self.lazy[idx] a : int = self.lazy[idx] a : Tuple = True a : Optional[Any] = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: a : int = val if left_element != right_element: a : int = val a : Dict = val a : List[str] = True a : List[str] = True return True a : Tuple = (left_element + right_element) // 2 self.update(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) self.update(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case , __snake_case ) a : Optional[int] = max( self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] ) return True def lowercase_ ( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ): if self.flag[idx] is True: a : str = self.lazy[idx] a : Optional[Any] = False if left_element != right_element: a : Dict = self.lazy[idx] a : Union[str, Any] = self.lazy[idx] a : Dict = True a : int = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] a : Dict = (left_element + right_element) // 2 a : Optional[int] = self.query(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case ) a : Union[str, Any] = self.query(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case ) return max(__snake_case , __snake_case ) def __str__( self : Any ): return str([self.query(1 , 1 , self.size , __snake_case , __snake_case ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": lowerCAmelCase: Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] lowerCAmelCase: int = 1_5 lowerCAmelCase: Optional[int] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
297
1
'''simple docstring''' import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration lowerCAmelCase: Any = [ # tf -> hf ('/', '.'), ('layer_', 'layers.'), ('kernel', 'weight'), ('beta', 'bias'), ('gamma', 'weight'), ('pegasus', 'model'), ] lowerCAmelCase: Union[str, Any] = [ ('.output.dense', '.fc2'), ('intermediate.LayerNorm', 'final_layer_norm'), ('intermediate.dense', 'fc1'), ] lowerCAmelCase: List[str] = ( INIT_COMMON + [ ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.out_proj'), ('attention.self', 'self_attn'), ('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'), ('attention.encdec_output.dense', 'encoder_attn.out_proj'), ('attention.encdec', 'encoder_attn'), ('key', 'k_proj'), ('value', 'v_proj'), ('query', 'q_proj'), ('decoder.LayerNorm', 'decoder.layernorm_embedding'), ] + END_COMMON ) lowerCAmelCase: Tuple = ( INIT_COMMON + [ ('embeddings.word_embeddings', 'shared.weight'), ('embeddings.position_embeddings', 'embed_positions.weight'), ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.output'), ('attention.self', 'self_attn.self'), ('encoder.LayerNorm', 'encoder.layernorm_embedding'), ] + END_COMMON ) lowerCAmelCase: Optional[Any] = [ 'encdec/key/bias', 'encdec/query/bias', 'encdec/value/bias', 'self/key/bias', 'self/query/bias', 'self/value/bias', 'encdec_output/dense/bias', 'attention/output/dense/bias', ] def lowerCamelCase__ ( _A , _A ): for tf_name, hf_name in patterns: a : List[str] = k.replace(_A , _A ) return k def lowerCamelCase__ ( _A , _A ): a : Optional[Any] = BigBirdPegasusConfig(**_A ) a : str = BigBirdPegasusForConditionalGeneration(_A ) a : List[Any] = torch_model.state_dict() a : str = {} # separating decoder weights a : int = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )} a : List[Any] = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )} for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ): a : Any = [k.endswith(_A ) for ending in KEYS_TO_IGNORE] if any(_A ): continue a : List[str] = DECODER_PATTERNS a : List[str] = rename_state_dict_key(_A , _A ) if new_k not in state_dict: raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ): a : Optional[Any] = v.T a : Dict = torch.from_numpy(_A ) assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ): a : Any = [k.endswith(_A ) for ending in KEYS_TO_IGNORE] if any(_A ): continue a : Tuple = REMAINING_PATTERNS a : int = rename_state_dict_key(_A , _A ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ): a : Optional[int] = v.T a : Any = torch.from_numpy(_A ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" a : Union[str, Any] = mapping['model.embed_positions.weight'] a : Union[str, Any] = mapping.pop('model.embed_positions.weight' ) a , a : List[str] = torch_model.load_state_dict(_A , strict=_A ) a : int = [ k for k in missing if k not in [ 'final_logits_bias', 'model.encoder.embed_tokens.weight', 'model.decoder.embed_tokens.weight', 'lm_head.weight', ] ] assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], f"""no matches found for the following tf keys {extra}""" return torch_model def lowerCamelCase__ ( _A ): a : List[Any] = tf.train.list_variables(_A ) a : Optional[Any] = {} a : Any = ['global_step'] for name, shape in tqdm(_A , desc='converting tf checkpoint to dict' ): a : Optional[Any] = any(pat in name for pat in ignore_name ) if skip_key: continue a : str = tf.train.load_variable(_A , _A ) a : Optional[Any] = array return tf_weights def lowerCamelCase__ ( _A , _A , _A ): a : Tuple = get_tf_weights_as_numpy(_A ) a : List[Any] = convert_bigbird_pegasus(_A , _A ) torch_model.save_pretrained(_A ) if __name__ == "__main__": lowerCAmelCase: Any = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') lowerCAmelCase: List[Any] = parser.parse_args() lowerCAmelCase: List[Any] = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
297
'''simple docstring''' def lowerCamelCase__ ( _A , _A ): while second != 0: a : Union[str, Any] = first & second first ^= second a : Tuple = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase: Optional[int] = int(input('Enter the first number: ').strip()) lowerCAmelCase: Union[str, Any] = int(input('Enter the second number: ').strip()) print(F"{add(first, second) = }")
297
1
'''simple docstring''' from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def lowerCamelCase__ ( _A , _A , _A = 1 / sqrt(2 ) ): a : Union[str, Any] = tau * frequency / samplerate a : List[Any] = sin(_A ) a : Optional[Any] = cos(_A ) a : str = _sin / (2 * q_factor) a : List[str] = (1 - _cos) / 2 a : int = 1 - _cos a : Tuple = 1 + alpha a : List[Any] = -2 * _cos a : str = 1 - alpha a : Any = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase__ ( _A , _A , _A = 1 / sqrt(2 ) ): a : str = tau * frequency / samplerate a : int = sin(_A ) a : int = cos(_A ) a : List[Any] = _sin / (2 * q_factor) a : Union[str, Any] = (1 + _cos) / 2 a : Tuple = -1 - _cos a : Union[str, Any] = 1 + alpha a : List[str] = -2 * _cos a : Union[str, Any] = 1 - alpha a : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase__ ( _A , _A , _A = 1 / sqrt(2 ) ): a : Optional[Any] = tau * frequency / samplerate a : str = sin(_A ) a : Dict = cos(_A ) a : int = _sin / (2 * q_factor) a : List[Any] = _sin / 2 a : Union[str, Any] = 0 a : Optional[int] = -ba a : Any = 1 + alpha a : Tuple = -2 * _cos a : Tuple = 1 - alpha a : Optional[int] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase__ ( _A , _A , _A = 1 / sqrt(2 ) ): a : Any = tau * frequency / samplerate a : List[Any] = sin(_A ) a : Any = cos(_A ) a : List[Any] = _sin / (2 * q_factor) a : str = 1 - alpha a : int = -2 * _cos a : int = 1 + alpha a : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def lowerCamelCase__ ( _A , _A , _A , _A = 1 / sqrt(2 ) , ): a : List[Any] = tau * frequency / samplerate a : List[Any] = sin(_A ) a : List[Any] = cos(_A ) a : Optional[int] = _sin / (2 * q_factor) a : str = 10 ** (gain_db / 40) a : List[Any] = 1 + alpha * big_a a : List[str] = -2 * _cos a : List[Any] = 1 - alpha * big_a a : Any = 1 + alpha / big_a a : Tuple = -2 * _cos a : Dict = 1 - alpha / big_a a : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase__ ( _A , _A , _A , _A = 1 / sqrt(2 ) , ): a : Union[str, Any] = tau * frequency / samplerate a : Dict = sin(_A ) a : Optional[Any] = cos(_A ) a : Any = _sin / (2 * q_factor) a : List[Any] = 10 ** (gain_db / 40) a : List[Any] = (big_a + 1) - (big_a - 1) * _cos a : Tuple = (big_a + 1) + (big_a - 1) * _cos a : Dict = (big_a - 1) - (big_a + 1) * _cos a : Any = (big_a - 1) + (big_a + 1) * _cos a : str = 2 * sqrt(_A ) * alpha a : Union[str, Any] = big_a * (pmc + aaa) a : Optional[int] = 2 * big_a * mpc a : Dict = big_a * (pmc - aaa) a : Tuple = ppmc + aaa a : Tuple = -2 * pmpc a : Optional[int] = ppmc - aaa a : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase__ ( _A , _A , _A , _A = 1 / sqrt(2 ) , ): a : List[str] = tau * frequency / samplerate a : Union[str, Any] = sin(_A ) a : int = cos(_A ) a : int = _sin / (2 * q_factor) a : List[Any] = 10 ** (gain_db / 40) a : Tuple = (big_a + 1) - (big_a - 1) * _cos a : List[Any] = (big_a + 1) + (big_a - 1) * _cos a : List[str] = (big_a - 1) - (big_a + 1) * _cos a : Dict = (big_a - 1) + (big_a + 1) * _cos a : Union[str, Any] = 2 * sqrt(_A ) * alpha a : Tuple = big_a * (ppmc + aaa) a : Union[str, Any] = -2 * big_a * pmpc a : int = big_a * (ppmc - aaa) a : Tuple = pmc + aaa a : List[str] = 2 * mpc a : Union[str, Any] = pmc - aaa a : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
297
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase__ ( _A , _A ): assert isinstance(_A , _A ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def lowerCamelCase__ ( _A , _A , _A ): a : str = tmp_path / 'cache' a : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def lowerCamelCase__ ( _A , _A , _A ): a : str = tmp_path / 'cache' a : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : Dict = features.copy() if features else default_expected_features a : Union[str, Any] = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( 'features' , [ None, {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}, ] , ) def lowerCamelCase__ ( _A , _A , _A ): a : Tuple = tmp_path / 'cache' a : Optional[Any] = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'} a : Optional[int] = features.copy() if features else default_expected_features a : Dict = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) a : Optional[int] = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowerCamelCase__ ( _A , _A ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} a : Dict = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'} a : int = features.copy() a : List[Any] = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) a : Dict = tmp_path / 'cache' a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def lowerCamelCase__ ( _A , _A , _A ): a : Dict = tmp_path / 'cache' a : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read() _check_json_dataset(_A , _A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def lowerCamelCase__ ( _A , _A , _A ): if issubclass(_A , _A ): a : Optional[int] = jsonl_path elif issubclass(_A , _A ): a : Optional[int] = [jsonl_path] a : List[str] = tmp_path / 'cache' a : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) def lowerCamelCase__ ( _A , _A , _A=("train",) ): assert isinstance(_A , _A ) for split in splits: a : str = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def lowerCamelCase__ ( _A , _A , _A ): a : Dict = tmp_path / 'cache' a : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): a : int = JsonDatasetReader({'train': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def lowerCamelCase__ ( _A , _A , _A ): a : Dict = tmp_path / 'cache' a : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : List[Any] = features.copy() if features else default_expected_features a : Any = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) a : List[str] = JsonDatasetReader({'train': jsonl_path} , features=_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def lowerCamelCase__ ( _A , _A , _A ): if split: a : Any = {split: jsonl_path} else: a : List[Any] = 'train' a : List[str] = {'train': jsonl_path, 'test': jsonl_path} a : List[Any] = tmp_path / 'cache' a : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase__ ( _A ): return json.load(_A ) def lowerCamelCase__ ( _A ): return [json.loads(_A ) for line in buffer] class a__: @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def lowercase_ ( self : Tuple , __snake_case : int , __snake_case : Optional[int] , __snake_case : Any ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case ).write() buffer.seek(0 ) a : List[str] = load_json_function(__snake_case ) assert isinstance(__snake_case , __snake_case ) assert isinstance(exported_content[0] , __snake_case ) assert len(__snake_case ) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def lowercase_ ( self : Tuple , __snake_case : Tuple , __snake_case : Any , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case ).write() buffer.seek(0 ) a : int = load_json(__snake_case ) assert isinstance(__snake_case , __snake_case ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(__snake_case ) == 10 @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def lowercase_ ( self : List[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Optional[Any] ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , num_proc=2 ).write() buffer.seek(0 ) a : List[Any] = load_json_function(__snake_case ) assert isinstance(__snake_case , __snake_case ) assert isinstance(exported_content[0] , __snake_case ) assert len(__snake_case ) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def lowercase_ ( self : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : int , __snake_case : List[Any] , __snake_case : Dict ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case , num_proc=2 ).write() buffer.seek(0 ) a : int = load_json(__snake_case ) assert isinstance(__snake_case , __snake_case ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(__snake_case ) == 10 def lowercase_ ( self : List[str] , __snake_case : str ): with pytest.raises(__snake_case ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , num_proc=0 ) @pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] ) def lowercase_ ( self : Tuple , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Optional[int] ): a : Tuple = tmp_path_factory.mktemp('data' ) / F"""test.json.{extension}""" a : List[Any] = str(shared_datadir / F"""test_file.json.{extension}""" ) JsonDatasetWriter(__snake_case , __snake_case , compression=__snake_case ).write() with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f: a : Union[str, Any] = f.read() with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f: a : Union[str, Any] = f.read() assert exported_content == original_content
297
1
'''simple docstring''' def lowerCamelCase__ ( _A ): a : List[Any] = 0 a : Optional[Any] = len(_A ) for i in range(n - 1 ): for j in range(i + 1 , _A ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def lowerCamelCase__ ( _A ): if len(_A ) <= 1: return arr, 0 a : Optional[int] = len(_A ) // 2 a : Any = arr[0:mid] a : List[Any] = arr[mid:] a , a : Optional[Any] = count_inversions_recursive(_A ) a , a : Union[str, Any] = count_inversions_recursive(_A ) a , a : List[str] = _count_cross_inversions(_A , _A ) a : Tuple = inversion_p + inversions_q + cross_inversions return c, num_inversions def lowerCamelCase__ ( _A , _A ): a : List[str] = [] a : str = 0 while i < len(_A ) and j < len(_A ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(_A ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(_A ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def lowerCamelCase__ ( ): a : Optional[int] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) a : Tuple = count_inversions_bf(_A ) a , a : str = count_inversions_recursive(_A ) assert num_inversions_bf == num_inversions_recursive == 8 print('number of inversions = ' , _A ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() a : Any = count_inversions_bf(_A ) a , a : List[str] = count_inversions_recursive(_A ) assert num_inversions_bf == num_inversions_recursive == 0 print('number of inversions = ' , _A ) # an empty list should also have zero inversions a : Tuple = [] a : Tuple = count_inversions_bf(_A ) a , a : str = count_inversions_recursive(_A ) assert num_inversions_bf == num_inversions_recursive == 0 print('number of inversions = ' , _A ) if __name__ == "__main__": main()
297
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def lowerCamelCase__ ( _A = "laptop" ): a : Any = f"""https://www.amazon.in/laptop/s?k={product}""" a : Tuple = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36', 'Accept-Language': 'en-US, en;q=0.5', } a : Any = BeautifulSoup(requests.get(_A , headers=_A ).text ) # Initialize a Pandas dataframe with the column titles a : Any = DataFrame( columns=[ 'Product Title', 'Product Link', 'Current Price of the product', 'Product Rating', 'MRP of the product', 'Discount', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( 'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ): try: a : Optional[int] = item.ha.text a : str = 'https://www.amazon.in/' + item.ha.a['href'] a : List[str] = item.find('span' , attrs={'class': 'a-offscreen'} ).text try: a : Optional[Any] = item.find('span' , attrs={'class': 'a-icon-alt'} ).text except AttributeError: a : Union[str, Any] = 'Not available' try: a : str = ( '₹' + item.find( 'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1] ) except AttributeError: a : int = '' try: a : Union[str, Any] = float( ( ( float(product_mrp.strip('₹' ).replace(',' , '' ) ) - float(product_price.strip('₹' ).replace(',' , '' ) ) ) / float(product_mrp.strip('₹' ).replace(',' , '' ) ) ) * 100 ) except ValueError: a : Any = float('nan' ) except AttributeError: pass a : Any = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] a : Any = ' ' a : List[str] = ' ' data_frame.index += 1 return data_frame if __name__ == "__main__": lowerCAmelCase: str = 'headphones' get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
297
1
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal lowerCAmelCase: str = logging.get_logger(__name__) lowerCAmelCase: int = TypeVar('DatasetType', Dataset, IterableDataset) def lowerCamelCase__ ( _A , _A = None , _A = None , _A = None , _A = None , _A = "first_exhausted" , ): from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(_A ): if not isinstance(_A , (Dataset, IterableDataset) ): if isinstance(_A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ 'is an empty dataset dictionary.' ) raise ValueError( f"""Dataset at position {i} has at least one split: {list(_A )}\n""" f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_A ) )}']""" ) raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_A ).__name__}.""" ) if i == 0: a , a : Optional[Any] = ( (Dataset, IterableDataset) if isinstance(_A , _A ) else (IterableDataset, Dataset) ) elif not isinstance(_A , _A ): raise ValueError( f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( _A , _A , _A , info=_A , split=_A , stopping_strategy=_A ) else: return _interleave_iterable_datasets( _A , _A , _A , info=_A , split=_A , stopping_strategy=_A ) def lowerCamelCase__ ( _A , _A = None , _A = None , _A = 0 , ): if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(_A ): if not isinstance(_A , (Dataset, IterableDataset) ): if isinstance(_A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ 'is an empty dataset dictionary.' ) raise ValueError( f"""Dataset at position {i} has at least one split: {list(_A )}\n""" f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_A ) )}']""" ) raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_A ).__name__}.""" ) if i == 0: a , a : Optional[Any] = ( (Dataset, IterableDataset) if isinstance(_A , _A ) else (IterableDataset, Dataset) ) elif not isinstance(_A , _A ): raise ValueError( f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(_A , info=_A , split=_A , axis=_A ) else: return _concatenate_iterable_datasets(_A , info=_A , split=_A , axis=_A )
297
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase__ = StableUnCLIPImgaImgPipeline lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowercase__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase__ = frozenset([] ) def lowercase_ ( self : int ): a : Dict = 32 a : str = embedder_hidden_size # image encoding components a : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) a : Dict = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=__snake_case , projection_dim=__snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) a : Dict = StableUnCLIPImageNormalizer(embedding_dim=__snake_case ) a : Optional[int] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' ) torch.manual_seed(0 ) a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) torch.manual_seed(0 ) a : Tuple = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) a : Union[str, Any] = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , ) torch.manual_seed(0 ) a : List[Any] = DDIMScheduler( beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=__snake_case , steps_offset=1 , ) torch.manual_seed(0 ) a : List[str] = AutoencoderKL() a : str = { # image encoding components 'feature_extractor': feature_extractor, 'image_encoder': image_encoder.eval(), # image noising components 'image_normalizer': image_normalizer.eval(), 'image_noising_scheduler': image_noising_scheduler, # regular denoising components 'tokenizer': tokenizer, 'text_encoder': text_encoder.eval(), 'unet': unet.eval(), 'scheduler': scheduler, 'vae': vae.eval(), } return components def lowercase_ ( self : Tuple , __snake_case : List[str] , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=True ): if str(__snake_case ).startswith('mps' ): a : Tuple = torch.manual_seed(__snake_case ) else: a : List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) a : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case ) if pil_image: a : Optional[Any] = input_image * 0.5 + 0.5 a : Optional[Any] = input_image.clamp(0 , 1 ) a : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() a : int = DiffusionPipeline.numpy_to_pil(__snake_case )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def lowercase_ ( self : Optional[Any] ): a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator a : Union[str, Any] = self.get_dummy_components() a : Any = StableUnCLIPImgaImgPipeline(**__snake_case ) a : Tuple = sd_pipe.to(__snake_case ) sd_pipe.set_progress_bar_config(disable=__snake_case ) a : Union[str, Any] = self.get_dummy_inputs(__snake_case ) inputs.update({'image_embeds': None} ) a : str = sd_pipe(**__snake_case ).images a : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) a : Optional[int] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase_ ( self : List[str] ): a : int = torch_device in ['cpu', 'mps'] self._test_attention_slicing_forward_pass(test_max_difference=__snake_case ) def lowercase_ ( self : int ): a : Optional[int] = torch_device in ['cpu', 'mps'] self._test_inference_batch_single_identical(test_max_difference=__snake_case ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowercase_ ( self : Dict ): self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__snake_case ) @slow @require_torch_gpu class a__( unittest.TestCase ): def lowercase_ ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : Optional[Any] ): a : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' ) a : Union[str, Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' ) a : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained( 'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 ) a : Optional[int] = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' ) a : List[str] = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__snake_case , __snake_case ) def lowercase_ ( self : Optional[int] ): a : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' ) a : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' ) a : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained( 'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) a : str = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' ) a : List[str] = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__snake_case , __snake_case ) def lowercase_ ( self : Any ): a : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained( 'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa ) a : Optional[Any] = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a : Optional[int] = pipe( __snake_case , 'anime turtle' , num_inference_steps=2 , output_type='np' , ) a : int = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
297
1
'''simple docstring''' from __future__ import annotations import csv import requests from bsa import BeautifulSoup def lowerCamelCase__ ( _A = "" ): a : Tuple = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250' a : List[str] = BeautifulSoup(requests.get(_A ).text , 'html.parser' ) a : Any = soup.find_all('td' , attrs='titleColumn' ) a : Union[str, Any] = soup.find_all('td' , class_='ratingColumn imdbRating' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(_A , _A ) } def lowerCamelCase__ ( _A = "IMDb_Top_250_Movies.csv" ): a : Optional[Any] = get_imdb_top_aaa_movies() with open(_A , 'w' , newline='' ) as out_file: a : Dict = csv.writer(_A ) writer.writerow(['Movie title', 'IMDb rating'] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
297
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowerCAmelCase: Union[str, Any] = logging.get_logger(__name__) lowerCAmelCase: List[str] = { 't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json', } class a__( lowerCamelCase__ ): lowercase__ = """t5""" lowercase__ = ["""past_key_values"""] lowercase__ = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self : Union[str, Any] , __snake_case : int=3_21_28 , __snake_case : str=5_12 , __snake_case : Dict=64 , __snake_case : Optional[int]=20_48 , __snake_case : Tuple=6 , __snake_case : Any=None , __snake_case : Optional[int]=8 , __snake_case : str=32 , __snake_case : Union[str, Any]=1_28 , __snake_case : Optional[int]=0.1 , __snake_case : Dict=1e-6 , __snake_case : int=1.0 , __snake_case : Optional[int]="relu" , __snake_case : Any=True , __snake_case : List[str]=True , __snake_case : Union[str, Any]=0 , __snake_case : Dict=1 , **__snake_case : Optional[int] , ): a : Optional[int] = vocab_size a : Dict = d_model a : Union[str, Any] = d_kv a : Dict = d_ff a : Tuple = num_layers a : Dict = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry a : int = num_heads a : str = relative_attention_num_buckets a : List[Any] = relative_attention_max_distance a : int = dropout_rate a : Tuple = layer_norm_epsilon a : str = initializer_factor a : List[Any] = feed_forward_proj a : Union[str, Any] = use_cache a : List[str] = self.feed_forward_proj.split('-' ) a : int = act_info[-1] a : Union[str, Any] = act_info[0] == 'gated' if len(__snake_case ) > 1 and act_info[0] != "gated" or len(__snake_case ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": a : Optional[Any] = 'gelu_new' super().__init__( pad_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , **__snake_case , ) class a__( lowerCamelCase__ ): @property def lowercase_ ( self : Optional[int] ): a : Dict = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: a : Dict = 'past_encoder_sequence + sequence' a : Dict = {0: 'batch'} a : Any = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: a : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'} a : List[str] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(__snake_case , direction='inputs' ) return common_inputs @property def lowercase_ ( self : List[Any] ): return 13
297
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class a__( unittest.TestCase ): def lowercase_ ( self : Dict ): a : Any = { 'task_specific_params': { 'summarization': {'length_penalty': 1.0, 'max_length': 1_28, 'min_length': 12, 'num_beams': 4}, 'summarization_cnn': {'length_penalty': 2.0, 'max_length': 1_42, 'min_length': 56, 'num_beams': 4}, 'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6}, } } a : str = { 'task_specific_params.summarization.length_penalty': 1.0, 'task_specific_params.summarization.max_length': 1_28, 'task_specific_params.summarization.min_length': 12, 'task_specific_params.summarization.num_beams': 4, 'task_specific_params.summarization_cnn.length_penalty': 2.0, 'task_specific_params.summarization_cnn.max_length': 1_42, 'task_specific_params.summarization_cnn.min_length': 56, 'task_specific_params.summarization_cnn.num_beams': 4, 'task_specific_params.summarization_xsum.length_penalty': 1.0, 'task_specific_params.summarization_xsum.max_length': 62, 'task_specific_params.summarization_xsum.min_length': 11, 'task_specific_params.summarization_xsum.num_beams': 6, } self.assertEqual(flatten_dict(__snake_case ) , __snake_case ) def lowercase_ ( self : int ): a : Dict = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(__snake_case ) , x.transpose() ) ) a : List[str] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def lowercase_ ( self : Union[str, Any] ): a : Any = np.random.randn(3 , 4 ) a : List[Any] = torch.tensor(__snake_case ) self.assertTrue(np.allclose(transpose(__snake_case ) , transpose(__snake_case ).numpy() ) ) a : Dict = np.random.randn(3 , 4 , 5 ) a : List[str] = torch.tensor(__snake_case ) self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , transpose(__snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_tf def lowercase_ ( self : Optional[Any] ): a : Any = np.random.randn(3 , 4 ) a : Dict = tf.constant(__snake_case ) self.assertTrue(np.allclose(transpose(__snake_case ) , transpose(__snake_case ).numpy() ) ) a : Dict = np.random.randn(3 , 4 , 5 ) a : str = tf.constant(__snake_case ) self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , transpose(__snake_case , axes=(1, 2, 0) ).numpy() ) ) @require_flax def lowercase_ ( self : Optional[int] ): a : Optional[int] = np.random.randn(3 , 4 ) a : Tuple = jnp.array(__snake_case ) self.assertTrue(np.allclose(transpose(__snake_case ) , np.asarray(transpose(__snake_case ) ) ) ) a : int = np.random.randn(3 , 4 , 5 ) a : Any = jnp.array(__snake_case ) self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(__snake_case , axes=(1, 2, 0) ) ) ) ) def lowercase_ ( self : Optional[Any] ): a : int = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , np.reshape(__snake_case , (4, 3) ) ) ) a : List[Any] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , np.reshape(__snake_case , (12, 5) ) ) ) @require_torch def lowercase_ ( self : Tuple ): a : str = np.random.randn(3 , 4 ) a : Any = torch.tensor(__snake_case ) self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , reshape(__snake_case , (4, 3) ).numpy() ) ) a : str = np.random.randn(3 , 4 , 5 ) a : Dict = torch.tensor(__snake_case ) self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , reshape(__snake_case , (12, 5) ).numpy() ) ) @require_tf def lowercase_ ( self : List[Any] ): a : str = np.random.randn(3 , 4 ) a : List[Any] = tf.constant(__snake_case ) self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , reshape(__snake_case , (4, 3) ).numpy() ) ) a : Optional[int] = np.random.randn(3 , 4 , 5 ) a : List[str] = tf.constant(__snake_case ) self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , reshape(__snake_case , (12, 5) ).numpy() ) ) @require_flax def lowercase_ ( self : str ): a : Union[str, Any] = np.random.randn(3 , 4 ) a : Optional[Any] = jnp.array(__snake_case ) self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , np.asarray(reshape(__snake_case , (4, 3) ) ) ) ) a : List[Any] = np.random.randn(3 , 4 , 5 ) a : Optional[int] = jnp.array(__snake_case ) self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , np.asarray(reshape(__snake_case , (12, 5) ) ) ) ) def lowercase_ ( self : Tuple ): a : Union[str, Any] = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(__snake_case ) , np.squeeze(__snake_case ) ) ) a : Any = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , np.squeeze(__snake_case , axis=2 ) ) ) @require_torch def lowercase_ ( self : Union[str, Any] ): a : int = np.random.randn(1 , 3 , 4 ) a : Union[str, Any] = torch.tensor(__snake_case ) self.assertTrue(np.allclose(squeeze(__snake_case ) , squeeze(__snake_case ).numpy() ) ) a : str = np.random.randn(1 , 4 , 1 , 5 ) a : str = torch.tensor(__snake_case ) self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , squeeze(__snake_case , axis=2 ).numpy() ) ) @require_tf def lowercase_ ( self : Dict ): a : int = np.random.randn(1 , 3 , 4 ) a : Optional[Any] = tf.constant(__snake_case ) self.assertTrue(np.allclose(squeeze(__snake_case ) , squeeze(__snake_case ).numpy() ) ) a : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 ) a : int = tf.constant(__snake_case ) self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , squeeze(__snake_case , axis=2 ).numpy() ) ) @require_flax def lowercase_ ( self : Tuple ): a : Tuple = np.random.randn(1 , 3 , 4 ) a : Dict = jnp.array(__snake_case ) self.assertTrue(np.allclose(squeeze(__snake_case ) , np.asarray(squeeze(__snake_case ) ) ) ) a : Optional[int] = np.random.randn(1 , 4 , 1 , 5 ) a : Union[str, Any] = jnp.array(__snake_case ) self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , np.asarray(squeeze(__snake_case , axis=2 ) ) ) ) def lowercase_ ( self : Any ): a : str = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , np.expand_dims(__snake_case , axis=1 ) ) ) @require_torch def lowercase_ ( self : Tuple ): a : Dict = np.random.randn(3 , 4 ) a : List[str] = torch.tensor(__snake_case ) self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , expand_dims(__snake_case , axis=1 ).numpy() ) ) @require_tf def lowercase_ ( self : int ): a : Any = np.random.randn(3 , 4 ) a : List[str] = tf.constant(__snake_case ) self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , expand_dims(__snake_case , axis=1 ).numpy() ) ) @require_flax def lowercase_ ( self : int ): a : str = np.random.randn(3 , 4 ) a : Optional[Any] = jnp.array(__snake_case ) self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , np.asarray(expand_dims(__snake_case , axis=1 ) ) ) )
297
'''simple docstring''' from __future__ import annotations from math import pi, sqrt def lowerCamelCase__ ( _A , _A ): if inductance <= 0: raise ValueError('Inductance cannot be 0 or negative' ) elif capacitance <= 0: raise ValueError('Capacitance cannot be 0 or negative' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
297
1
'''simple docstring''' from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class a__( lowerCamelCase__ ): lowercase__ = ["""image_processor""", """tokenizer"""] lowercase__ = """AutoImageProcessor""" lowercase__ = """AutoTokenizer""" def __init__( self : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[Any] ): super().__init__(__snake_case , __snake_case ) a : Optional[int] = self.image_processor def __call__( self : Optional[int] , __snake_case : List[str]=None , __snake_case : Dict=None , __snake_case : int=None , **__snake_case : int ): if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: a : int = self.tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case ) if images is not None: a : str = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case ) if text is not None and images is not None: a : Tuple = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__snake_case ) , tensor_type=__snake_case ) def lowercase_ ( self : Union[str, Any] , *__snake_case : Tuple , **__snake_case : List[str] ): return self.tokenizer.batch_decode(*__snake_case , **__snake_case ) def lowercase_ ( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : int ): return self.tokenizer.decode(*__snake_case , **__snake_case ) @property def lowercase_ ( self : List[str] ): return ["input_ids", "attention_mask", "pixel_values"]
297
'''simple docstring''' import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase: Any = logging.get_logger(__name__) lowerCAmelCase: Any = {'vocab_file': 'vocab.txt'} lowerCAmelCase: List[Any] = { 'vocab_file': { 'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt', }, } lowerCAmelCase: str = { 'openbmb/cpm-ant-10b': 1_0_2_4, } def lowerCamelCase__ ( _A ): a : Union[str, Any] = collections.OrderedDict() with open(_A , 'r' , encoding='utf-8' ) as reader: a : int = reader.readlines() for index, token in enumerate(_A ): a : int = token.rstrip('\n' ) a : List[Any] = index return vocab class a__( lowerCamelCase__ ): def __init__( self : Dict , __snake_case : Any , __snake_case : Dict="<unk>" , __snake_case : str=2_00 ): a : List[Any] = vocab a : Any = unk_token a : List[str] = max_input_chars_per_word def lowercase_ ( self : Optional[int] , __snake_case : Union[str, Any] ): a : Optional[Any] = list(__snake_case ) if len(__snake_case ) > self.max_input_chars_per_word: return [self.unk_token] a : Any = 0 a : Optional[Any] = [] while start < len(__snake_case ): a : Optional[int] = len(__snake_case ) a : str = None while start < end: a : Optional[Any] = ''.join(chars[start:end] ) if substr in self.vocab: a : List[str] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(__snake_case ) a : List[str] = end return sub_tokens class a__( lowerCamelCase__ ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = ["""input_ids""", """attention_mask"""] lowercase__ = False def __init__( self : Any , __snake_case : str , __snake_case : Tuple="<d>" , __snake_case : List[str]="</d>" , __snake_case : Dict="<s>" , __snake_case : List[Any]="</s>" , __snake_case : int="<pad>" , __snake_case : Any="<unk>" , __snake_case : List[str]="</n>" , __snake_case : int="</_>" , __snake_case : Optional[Any]="left" , **__snake_case : Dict , ): requires_backends(self , ['jieba'] ) super().__init__( bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , ) a : Union[str, Any] = bod_token a : Any = eod_token a : List[str] = load_vocab(__snake_case ) a : Optional[int] = self.encoder[space_token] a : str = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] a : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) ) a : Tuple = {v: k for k, v in self.encoder.items()} a : List[str] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def lowercase_ ( self : Optional[int] ): return self.encoder[self.bod_token] @property def lowercase_ ( self : Dict ): return self.encoder[self.eod_token] @property def lowercase_ ( self : Any ): return self.encoder["\n"] @property def lowercase_ ( self : Tuple ): return len(self.encoder ) def lowercase_ ( self : str ): return dict(self.encoder , **self.added_tokens_encoder ) def lowercase_ ( self : Union[str, Any] , __snake_case : List[str] ): a : List[str] = [] for x in jieba.cut(__snake_case , cut_all=__snake_case ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) ) return output_tokens def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[Any] , **__snake_case : Optional[Any] ): a : Optional[int] = [i for i in token_ids if i >= 0] a : Any = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(__snake_case , **__snake_case ) def lowercase_ ( self : Optional[int] , __snake_case : int ): return token in self.encoder def lowercase_ ( self : int , __snake_case : List[str] ): return "".join(__snake_case ) def lowercase_ ( self : List[str] , __snake_case : Union[str, Any] ): return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) ) def lowercase_ ( self : Tuple , __snake_case : List[str] ): return self.decoder.get(__snake_case , self.unk_token ) def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] = None ): if os.path.isdir(__snake_case ): a : Optional[int] = os.path.join( __snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: a : int = (filename_prefix + '-' if filename_prefix else '') + save_directory a : Any = 0 if " " in self.encoder: a : Union[str, Any] = self.encoder[' '] del self.encoder[" "] if "\n" in self.encoder: a : Tuple = self.encoder['\n'] del self.encoder["\n"] a : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) ) with open(__snake_case , 'w' , encoding='utf-8' ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ' Please check that the vocabulary is not corrupted!' ) a : List[Any] = token_index writer.write(token + '\n' ) index += 1 return (vocab_file,) def lowercase_ ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : List[int] = None ): if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def lowercase_ ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case ) if token_ids_a is not None: return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) return [1] + ([0] * len(__snake_case ))
297
1
'''simple docstring''' from collections import namedtuple lowerCAmelCase: Dict = namedtuple('from_to', 'from_ to') lowerCAmelCase: Optional[int] = { 'cubicmeter': from_to(1, 1), 'litre': from_to(0.001, 1_0_0_0), 'kilolitre': from_to(1, 1), 'gallon': from_to(0.0_0454, 264.172), 'cubicyard': from_to(0.7_6455, 1.3_0795), 'cubicfoot': from_to(0.028, 35.3147), 'cup': from_to(0.0_0023_6588, 4226.75), } def lowerCamelCase__ ( _A , _A , _A ): if from_type not in METRIC_CONVERSION: raise ValueError( f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n""" + ', '.join(_A ) ) if to_type not in METRIC_CONVERSION: raise ValueError( f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n""" + ', '.join(_A ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
297
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class a__( unittest.TestCase ): @slow def lowercase_ ( self : List[Any] ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: a : Optional[int] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = TFAutoModel.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Tuple = AutoModel.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : str ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: a : List[str] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : List[Any] = AutoModelForPreTraining.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : int ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Union[str, Any] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : List[Any] = TFAutoModelForCausalLM.from_pretrained(__snake_case , from_pt=__snake_case ) a , a : Any = TFAutoModelForCausalLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = AutoModelForCausalLM.from_pretrained(__snake_case , from_tf=__snake_case ) a , a : Tuple = AutoModelForCausalLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : Any ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Tuple = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : List[str] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : Optional[int] ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : List[str] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(__snake_case , from_pt=__snake_case ) a , a : Optional[int] = TFAutoModelForMaskedLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : str = AutoModelForMaskedLM.from_pretrained(__snake_case , from_tf=__snake_case ) a , a : Tuple = AutoModelForMaskedLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : int ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Optional[Any] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : str = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_pt=__snake_case ) a , a : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = AutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_tf=__snake_case ) a , a : str = AutoModelForSeqaSeqLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : Optional[Any] ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: a : Tuple = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = AutoModelForSequenceClassification.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : str ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: a : Optional[Any] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : int = TFAutoModelForQuestionAnswering.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Tuple = AutoModelForQuestionAnswering.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) def lowercase_ ( self : Tuple ): a : List[Any] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 ) a : Optional[int] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 ) def lowercase_ ( self : Any ): a : int = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 ) a : Optional[Any] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
297
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class a__( unittest.TestCase ): def lowercase_ ( self : int ): a : List[str] = tempfile.mkdtemp() a : List[str] = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) a : List[Any] = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.48145466, 0.4578275, 0.40821073], 'image_std': [0.26862954, 0.26130258, 0.27577711], } a : Optional[int] = os.path.join(self.tmpdirname , __snake_case ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(__snake_case , __snake_case ) def lowercase_ ( self : str , **__snake_case : Dict ): return BertTokenizer.from_pretrained(self.tmpdirname , **__snake_case ) def lowercase_ ( self : Dict , **__snake_case : str ): return BertTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case ) def lowercase_ ( self : Dict , **__snake_case : Union[str, Any] ): return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__snake_case ) def lowercase_ ( self : Tuple ): shutil.rmtree(self.tmpdirname ) def lowercase_ ( self : int ): a : Union[str, Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] a : Dict = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase_ ( self : Optional[int] ): a : int = self.get_tokenizer() a : Optional[Any] = self.get_rust_tokenizer() a : Optional[Any] = self.get_image_processor() a : Any = AlignProcessor(tokenizer=__snake_case , image_processor=__snake_case ) processor_slow.save_pretrained(self.tmpdirname ) a : List[str] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__snake_case ) a : List[Any] = AlignProcessor(tokenizer=__snake_case , image_processor=__snake_case ) processor_fast.save_pretrained(self.tmpdirname ) a : Tuple = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __snake_case ) self.assertIsInstance(processor_fast.tokenizer , __snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __snake_case ) self.assertIsInstance(processor_fast.image_processor , __snake_case ) def lowercase_ ( self : Union[str, Any] ): a : int = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) a : Dict = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) a : str = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 ) a : Union[str, Any] = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__snake_case , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __snake_case ) def lowercase_ ( self : Optional[int] ): a : int = self.get_image_processor() a : str = self.get_tokenizer() a : List[str] = AlignProcessor(tokenizer=__snake_case , image_processor=__snake_case ) a : List[Any] = self.prepare_image_inputs() a : str = image_processor(__snake_case , return_tensors='np' ) a : Union[str, Any] = processor(images=__snake_case , return_tensors='np' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase_ ( self : Tuple ): a : Union[str, Any] = self.get_image_processor() a : Tuple = self.get_tokenizer() a : Union[str, Any] = AlignProcessor(tokenizer=__snake_case , image_processor=__snake_case ) a : Optional[Any] = 'lower newer' a : List[Any] = processor(text=__snake_case ) a : Union[str, Any] = tokenizer(__snake_case , padding='max_length' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase_ ( self : Union[str, Any] ): a : Optional[int] = self.get_image_processor() a : Any = self.get_tokenizer() a : List[str] = AlignProcessor(tokenizer=__snake_case , image_processor=__snake_case ) a : Union[str, Any] = 'lower newer' a : List[Any] = self.prepare_image_inputs() a : str = processor(text=__snake_case , images=__snake_case ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__snake_case ): processor() def lowercase_ ( self : int ): a : Union[str, Any] = self.get_image_processor() a : Any = self.get_tokenizer() a : Tuple = AlignProcessor(tokenizer=__snake_case , image_processor=__snake_case ) a : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a : Dict = processor.batch_decode(__snake_case ) a : Tuple = tokenizer.batch_decode(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) def lowercase_ ( self : Dict ): a : Any = self.get_image_processor() a : Any = self.get_tokenizer() a : Optional[Any] = AlignProcessor(tokenizer=__snake_case , image_processor=__snake_case ) a : List[str] = 'lower newer' a : Any = self.prepare_image_inputs() a : Dict = processor(text=__snake_case , images=__snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
297
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase: List[Any] = logging.get_logger(__name__) lowerCAmelCase: List[Any] = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class a__( lowerCamelCase__ ): lowercase__ = """roberta""" def __init__( self : Tuple , __snake_case : List[str]=5_02_65 , __snake_case : int=7_68 , __snake_case : Union[str, Any]=12 , __snake_case : Dict=12 , __snake_case : Tuple=30_72 , __snake_case : Optional[Any]="gelu" , __snake_case : str=0.1 , __snake_case : Any=0.1 , __snake_case : str=5_12 , __snake_case : int=2 , __snake_case : Any=0.02 , __snake_case : int=1e-1_2 , __snake_case : str=1 , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=2 , __snake_case : Optional[int]="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=None , **__snake_case : str , ): super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) a : List[str] = vocab_size a : str = hidden_size a : Tuple = num_hidden_layers a : Dict = num_attention_heads a : List[Any] = hidden_act a : str = intermediate_size a : Union[str, Any] = hidden_dropout_prob a : Optional[Any] = attention_probs_dropout_prob a : Any = max_position_embeddings a : Optional[int] = type_vocab_size a : str = initializer_range a : List[Any] = layer_norm_eps a : Optional[int] = position_embedding_type a : Dict = use_cache a : Any = classifier_dropout class a__( lowerCamelCase__ ): @property def lowercase_ ( self : int ): if self.task == "multiple-choice": a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: a : str = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
297
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase: Dict = { 'configuration_mobilebert': [ 'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileBertConfig', 'MobileBertOnnxConfig', ], 'tokenization_mobilebert': ['MobileBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Tuple = ['MobileBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: int = [ 'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MobileBertForMaskedLM', 'MobileBertForMultipleChoice', 'MobileBertForNextSentencePrediction', 'MobileBertForPreTraining', 'MobileBertForQuestionAnswering', 'MobileBertForSequenceClassification', 'MobileBertForTokenClassification', 'MobileBertLayer', 'MobileBertModel', 'MobileBertPreTrainedModel', 'load_tf_weights_in_mobilebert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: List[Any] = [ 'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFMobileBertForMaskedLM', 'TFMobileBertForMultipleChoice', 'TFMobileBertForNextSentencePrediction', 'TFMobileBertForPreTraining', 'TFMobileBertForQuestionAnswering', 'TFMobileBertForSequenceClassification', 'TFMobileBertForTokenClassification', 'TFMobileBertMainLayer', 'TFMobileBertModel', 'TFMobileBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys lowerCAmelCase: str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
297
'''simple docstring''' def lowerCamelCase__ ( _A ): return 10 - x * x def lowerCamelCase__ ( _A , _A ): # Bolzano theory in order to find if there is a root between a and b if equation(_A ) * equation(_A ) >= 0: raise ValueError('Wrong space!' ) a : Tuple = a while (b - a) >= 0.01: # Find middle point a : Tuple = (a + b) / 2 # Check if middle point is root if equation(_A ) == 0.0: break # Decide the side to repeat the steps if equation(_A ) * equation(_A ) < 0: a : List[str] = c else: a : Tuple = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
297
1
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase: str = logging.get_logger(__name__) lowerCAmelCase: Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase: Any = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } lowerCAmelCase: Union[str, Any] = { 'gpt-neox-20b': 2_0_4_8, } class a__( lowerCamelCase__ ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = ["""input_ids""", """attention_mask"""] def __init__( self : List[str] , __snake_case : Optional[int]=None , __snake_case : Optional[int]=None , __snake_case : Any=None , __snake_case : int="<|endoftext|>" , __snake_case : List[Any]="<|endoftext|>" , __snake_case : Tuple="<|endoftext|>" , __snake_case : str=False , **__snake_case : List[str] , ): super().__init__( __snake_case , __snake_case , tokenizer_file=__snake_case , unk_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , add_prefix_space=__snake_case , **__snake_case , ) a : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , __snake_case ) != add_prefix_space: a : str = getattr(__snake_case , pre_tok_state.pop('type' ) ) a : Optional[int] = add_prefix_space a : Any = pre_tok_class(**__snake_case ) a : Optional[int] = add_prefix_space def lowercase_ ( self : Optional[int] , __snake_case : str , __snake_case : Optional[str] = None ): a : Optional[int] = self._tokenizer.model.save(__snake_case , name=__snake_case ) return tuple(__snake_case ) def lowercase_ ( self : Tuple , __snake_case : "Conversation" ): a : str = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__snake_case , add_special_tokens=__snake_case ) + [self.eos_token_id] ) if len(__snake_case ) > self.model_max_length: a : Tuple = input_ids[-self.model_max_length :] return input_ids
297
'''simple docstring''' import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class a__: def __init__( self : str , __snake_case : Union[str, Any] , __snake_case : List[str]=13 , __snake_case : Tuple=7 , __snake_case : Optional[Any]=False , __snake_case : Dict=True , __snake_case : List[Any]=False , __snake_case : Optional[int]=False , __snake_case : Optional[Any]=19 , __snake_case : Any=32 , __snake_case : Union[str, Any]=5 , __snake_case : Union[str, Any]=4 , __snake_case : int=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=0.1 , __snake_case : int=5_12 , __snake_case : int=16 , __snake_case : Tuple=2 , __snake_case : str=0.02 , __snake_case : str=3 , __snake_case : Dict=4 , __snake_case : List[Any]=None , ): a : Tuple = parent a : List[str] = batch_size a : Optional[Any] = seq_length a : Tuple = is_training a : Optional[Any] = use_input_mask a : List[Any] = use_token_type_ids a : List[Any] = use_labels a : int = vocab_size a : Union[str, Any] = hidden_size a : Any = num_hidden_layers a : List[str] = num_attention_heads a : int = intermediate_size a : str = hidden_act a : Tuple = hidden_dropout_prob a : Union[str, Any] = attention_probs_dropout_prob a : List[str] = max_position_embeddings a : Any = type_vocab_size a : List[str] = type_sequence_label_size a : Union[str, Any] = initializer_range a : Optional[int] = num_labels a : Optional[Any] = num_choices a : Optional[int] = scope def lowercase_ ( self : List[Any] ): a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a : Dict = None if self.use_input_mask: a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) a : Optional[Any] = None a : Optional[int] = None a : Dict = None if self.use_labels: a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a : List[str] = ids_tensor([self.batch_size] , self.num_choices ) a : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase_ ( self : List[Any] ): a : Any = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__snake_case , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , ) return config def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Any ): a : Tuple = EsmForProteinFolding(config=__snake_case ).float() model.to(__snake_case ) model.eval() a : Dict = model(__snake_case , attention_mask=__snake_case ) a : Union[str, Any] = model(__snake_case ) a : List[Any] = model(__snake_case ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def lowercase_ ( self : Optional[Any] ): a : Tuple = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) : Optional[Any] = config_and_inputs a : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase__ = False lowercase__ = (EsmForProteinFolding,) if is_torch_available() else () lowercase__ = () lowercase__ = {} if is_torch_available() else {} lowercase__ = False def lowercase_ ( self : int ): a : Tuple = EsmFoldModelTester(self ) a : Any = ConfigTester(self , config_class=__snake_case , hidden_size=37 ) def lowercase_ ( self : List[str] ): self.config_tester.run_common_tests() def lowercase_ ( self : Union[str, Any] ): a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) @unittest.skip('Does not support attention outputs' ) def lowercase_ ( self : str ): pass @unittest.skip def lowercase_ ( self : Optional[int] ): pass @unittest.skip('Esm does not support embedding resizing' ) def lowercase_ ( self : Optional[int] ): pass @unittest.skip('Esm does not support embedding resizing' ) def lowercase_ ( self : Any ): pass @unittest.skip('ESMFold does not support passing input embeds!' ) def lowercase_ ( self : Any ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : Union[str, Any] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : int ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold does not output hidden states in the normal way.' ) def lowercase_ ( self : int ): pass @unittest.skip('ESMfold does not output hidden states in the normal way.' ) def lowercase_ ( self : int ): pass @unittest.skip('ESMFold only has one output format.' ) def lowercase_ ( self : Dict ): pass @unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' ) def lowercase_ ( self : Tuple ): pass @unittest.skip('ESMFold does not support input chunking.' ) def lowercase_ ( self : List[str] ): pass @unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowercase_ ( self : Union[str, Any] ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowercase_ ( self : Any ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowercase_ ( self : List[str] ): pass @unittest.skip('ESMFold doesn\'t support data parallel.' ) def lowercase_ ( self : Dict ): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase_ ( self : Union[str, Any] ): pass @require_torch class a__( lowerCamelCase__ ): @slow def lowercase_ ( self : Optional[int] ): a : Optional[Any] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float() model.eval() a : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) a : Any = model(__snake_case )['positions'] a : Dict = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __snake_case , atol=1e-4 ) )
297
1
'''simple docstring''' import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () lowerCAmelCase: Any = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). lowerCAmelCase: Optional[Any] = [0, 2_5, 5_0] lowerCAmelCase: Any = [2_5, 5_0, 7_5] lowerCAmelCase: Dict = fuzz.membership.trimf(X, abca) lowerCAmelCase: Dict = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. lowerCAmelCase: Any = np.ones(7_5) lowerCAmelCase: Any = np.zeros((7_5,)) # 1. Union = max(µA(x), µB(x)) lowerCAmelCase: Any = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) lowerCAmelCase: Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) lowerCAmelCase: List[Any] = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) lowerCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] lowerCAmelCase: Tuple = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) lowerCAmelCase: Dict = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] lowerCAmelCase: Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] lowerCAmelCase: Any = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('Young') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('Middle aged') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('union') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('intersection') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('complement_a') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('difference a/b') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('alg_sum') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('alg_product') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('bdd_sum') plt.grid(True) plt.subplot(4, 3, 1_0) plt.plot(X, bdd_difference) plt.title('bdd_difference') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
297
'''simple docstring''' from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class a__( nn.Module ): def __init__( self : Any , __snake_case : int = 16 , __snake_case : int = 88 , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : float = 0.0 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , ): super().__init__() a : Optional[int] = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__snake_case , attention_head_dim=__snake_case , in_channels=__snake_case , num_layers=__snake_case , dropout=__snake_case , norm_num_groups=__snake_case , cross_attention_dim=__snake_case , attention_bias=__snake_case , sample_size=__snake_case , num_vector_embeds=__snake_case , activation_fn=__snake_case , num_embeds_ada_norm=__snake_case , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference a : Union[str, Any] = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` a : Tuple = [77, 2_57] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` a : Any = [1, 0] def lowercase_ ( self : str , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Dict=None , __snake_case : bool = True , ): a : Dict = hidden_states a : Tuple = [] a : Optional[int] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens a : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] a : Tuple = self.transformer_index_for_condition[i] a : Union[str, Any] = self.transformers[transformer_index]( __snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , return_dict=__snake_case , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] a : Optional[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) a : int = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__snake_case )
297
1
'''simple docstring''' from __future__ import annotations lowerCAmelCase: Any = [True] * 1_0_0_0_0_0_1 lowerCAmelCase: List[str] = 2 while i * i <= 1_0_0_0_0_0_0: if seive[i]: for j in range(i * i, 1_0_0_0_0_0_1, i): lowerCAmelCase: List[str] = False i += 1 def lowerCamelCase__ ( _A ): return seive[n] def lowerCamelCase__ ( _A ): return any(digit in '02468' for digit in str(_A ) ) def lowerCamelCase__ ( _A = 100_0000 ): a : List[str] = [2] # result already includes the number 2. for num in range(3 , limit + 1 , 2 ): if is_prime(_A ) and not contains_an_even_digit(_A ): a : Any = str(_A ) a : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(_A ) )] if all(is_prime(_A ) for i in list_nums ): result.append(_A ) return result def lowerCamelCase__ ( ): return len(find_circular_primes() ) if __name__ == "__main__": print(F"{len(find_circular_primes()) = }")
297
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase: Union[str, Any] = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: List[Any] = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Any = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase: Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
297
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available lowerCAmelCase: List[str] = { 'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: List[Any] = [ 'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ErnieForCausalLM', 'ErnieForMaskedLM', 'ErnieForMultipleChoice', 'ErnieForNextSentencePrediction', 'ErnieForPreTraining', 'ErnieForQuestionAnswering', 'ErnieForSequenceClassification', 'ErnieForTokenClassification', 'ErnieModel', 'ErniePreTrainedModel', ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys lowerCAmelCase: int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
297
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase: str = { 'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'], 'processing_mgp_str': ['MgpstrProcessor'], 'tokenization_mgp_str': ['MgpstrTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Optional[Any] = [ 'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST', 'MgpstrModel', 'MgpstrPreTrainedModel', 'MgpstrForSceneTextRecognition', ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys lowerCAmelCase: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
297
1
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCamelCase__ ( _A , _A ): a : Dict = BeautifulSoup(requests.get(_A , params=_A ).content , 'html.parser' ) a : Union[str, Any] = soup.find('div' , attrs={'class': 'gs_ri'} ) a : Dict = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' ) return anchors[2].get_text() if __name__ == "__main__": lowerCAmelCase: Dict = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 3_0, 'pages': '3979-3990', 'year': 2_0_1_8, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
297
'''simple docstring''' import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowerCAmelCase: Dict = logging.get_logger(__name__) lowerCAmelCase: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase: List[Any] = { 'vocab_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json', }, 'merges_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt', }, 'tokenizer_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json', }, } lowerCAmelCase: str = { 'allenai/led-base-16384': 1_6_3_8_4, } class a__( lowerCamelCase__ ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = LEDTokenizer lowercase__ = ["""input_ids""", """attention_mask"""] def __init__( self : List[Any] , __snake_case : Optional[Any]=None , __snake_case : List[str]=None , __snake_case : Tuple=None , __snake_case : Dict="replace" , __snake_case : int="<s>" , __snake_case : Any="</s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : List[str]="<pad>" , __snake_case : int="<mask>" , __snake_case : int=False , __snake_case : str=True , **__snake_case : Tuple , ): super().__init__( __snake_case , __snake_case , tokenizer_file=__snake_case , errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case , **__snake_case , ) a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , __snake_case ) != add_prefix_space: a : List[Any] = getattr(__snake_case , pre_tok_state.pop('type' ) ) a : Optional[Any] = add_prefix_space a : Optional[Any] = pre_tok_class(**__snake_case ) a : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` a : Dict = 'post_processor' a : int = getattr(self.backend_tokenizer , __snake_case , __snake_case ) if tokenizer_component_instance: a : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a : Any = tuple(state['sep'] ) if "cls" in state: a : Any = tuple(state['cls'] ) a : Optional[Any] = False if state.get('add_prefix_space' , __snake_case ) != add_prefix_space: a : Any = add_prefix_space a : Optional[Any] = True if state.get('trim_offsets' , __snake_case ) != trim_offsets: a : List[Any] = trim_offsets a : Union[str, Any] = True if changes_to_apply: a : int = getattr(__snake_case , state.pop('type' ) ) a : List[Any] = component_class(**__snake_case ) setattr(self.backend_tokenizer , __snake_case , __snake_case ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def lowercase_ ( self : Dict ): if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def lowercase_ ( self : Dict , __snake_case : List[str] ): a : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else value a : Optional[int] = value def lowercase_ ( self : Optional[Any] , *__snake_case : Any , **__snake_case : Union[str, Any] ): a : Dict = kwargs.get('is_split_into_words' , __snake_case ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*__snake_case , **__snake_case ) def lowercase_ ( self : Union[str, Any] , *__snake_case : Optional[int] , **__snake_case : List[str] ): a : Optional[int] = kwargs.get('is_split_into_words' , __snake_case ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.' ) return super()._encode_plus(*__snake_case , **__snake_case ) def lowercase_ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ): a : Union[str, Any] = self._tokenizer.model.save(__snake_case , name=__snake_case ) return tuple(__snake_case ) def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : int=None ): a : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ): a : int = [self.sep_token_id] a : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase_ ( self : List[str] , __snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ): a : Optional[Any] = super()._pad( encoded_inputs=__snake_case , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , ) # Load from model defaults if return_attention_mask is None: a : str = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: a : Any = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. a : Union[str, Any] = len(encoded_inputs['global_attention_mask'] ) != len(__snake_case ) if needs_to_be_padded: a : str = len(__snake_case ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` a : Dict = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": a : Union[str, Any] = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
297
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor lowerCAmelCase: Optional[Any] = logging.get_logger(__name__) class a__( lowerCamelCase__ ): def __init__( self : Any , *__snake_case : List[str] , **__snake_case : Any ): warnings.warn( 'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use BeitImageProcessor instead.' , __snake_case , ) super().__init__(*__snake_case , **__snake_case )
297
'''simple docstring''' import copy import os import cva import numpy as np from matplotlib import pyplot as plt class a__: def __init__( self : Tuple ): a : Optional[int] = '' a : Optional[Any] = '' a : str = [] a : int = 0 a : str = 2_56 a : Union[str, Any] = 0 a : Any = 0 a : Optional[int] = 0 a : List[str] = 0 def lowercase_ ( self : str , __snake_case : str ): a : Any = cva.imread(__snake_case , 0 ) a : Optional[Any] = copy.deepcopy(self.img ) a , a , a : int = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' ) a : Optional[int] = np.sum(__snake_case ) for i in range(len(__snake_case ) ): a : Optional[Any] = x[i] / self.k self.sk += prk a : str = (self.L - 1) * self.sk if self.rem != 0: a : Optional[int] = int(last % last ) a : int = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(__snake_case ) a : str = int(np.ma.count(self.img ) / self.img[1].size ) a : Optional[int] = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): a : Any = self.img[j][i] if num != self.last_list[num]: a : str = self.last_list[num] cva.imwrite('output_data/output.jpg' , self.img ) def lowercase_ ( self : Dict ): plt.hist(self.img.ravel() , 2_56 , [0, 2_56] ) def lowercase_ ( self : List[Any] ): cva.imshow('Output-Image' , self.img ) cva.imshow('Input-Image' , self.original_image ) cva.waitKey(50_00 ) cva.destroyAllWindows() if __name__ == "__main__": lowerCAmelCase: Optional[Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg') lowerCAmelCase: Tuple = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
297
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging lowerCAmelCase: Optional[Any] = logging.get_logger(__name__) lowerCAmelCase: List[str] = { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class a__( lowerCamelCase__ ): lowercase__ = """blenderbot-small""" lowercase__ = ["""past_key_values"""] lowercase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : Any , __snake_case : List[Any]=5_02_65 , __snake_case : Dict=5_12 , __snake_case : Optional[int]=8 , __snake_case : Optional[Any]=20_48 , __snake_case : Union[str, Any]=16 , __snake_case : Optional[int]=8 , __snake_case : Union[str, Any]=20_48 , __snake_case : Union[str, Any]=16 , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : List[Any]=True , __snake_case : Union[str, Any]=True , __snake_case : List[str]="gelu" , __snake_case : Dict=5_12 , __snake_case : Optional[int]=0.1 , __snake_case : List[Any]=0.0 , __snake_case : int=0.0 , __snake_case : Dict=0.02 , __snake_case : int=1 , __snake_case : int=False , __snake_case : int=0 , __snake_case : List[Any]=1 , __snake_case : Dict=2 , __snake_case : List[Any]=2 , **__snake_case : int , ): a : Optional[Any] = vocab_size a : List[str] = max_position_embeddings a : List[str] = d_model a : int = encoder_ffn_dim a : Optional[int] = encoder_layers a : Any = encoder_attention_heads a : Optional[Any] = decoder_ffn_dim a : Any = decoder_layers a : Any = decoder_attention_heads a : Union[str, Any] = dropout a : Dict = attention_dropout a : Union[str, Any] = activation_dropout a : Dict = activation_function a : Any = init_std a : int = encoder_layerdrop a : Tuple = decoder_layerdrop a : int = use_cache a : str = encoder_layers a : int = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , ) class a__( lowerCamelCase__ ): @property def lowercase_ ( self : List[str] ): if self.task in ["default", "seq2seq-lm"]: a : Any = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: a : Tuple = {0: 'batch'} a : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: a : Tuple = {0: 'batch', 1: 'decoder_sequence'} a : List[str] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(__snake_case , direction='inputs' ) elif self.task == "causal-lm": # TODO: figure this case out. a : Any = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: a , a : int = self.num_layers for i in range(__snake_case ): a : List[str] = {0: 'batch', 2: 'past_sequence + sequence'} a : Union[str, Any] = {0: 'batch', 2: 'past_sequence + sequence'} else: a : Tuple = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ] ) return common_inputs @property def lowercase_ ( self : Optional[int] ): if self.task in ["default", "seq2seq-lm"]: a : List[str] = super().outputs else: a : Optional[int] = super(__snake_case , self ).outputs if self.use_past: a , a : Optional[Any] = self.num_layers for i in range(__snake_case ): a : List[str] = {0: 'batch', 2: 'past_sequence + sequence'} a : Dict = {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def lowercase_ ( self : Union[str, Any] , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ): a : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # Generate decoder inputs a : List[str] = seq_length if not self.use_past else 1 a : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) a : Dict = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} a : Tuple = dict(**__snake_case , **__snake_case ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch a , a : Union[str, Any] = common_inputs['input_ids'].shape a : List[str] = common_inputs['decoder_input_ids'].shape[1] a , a : List[str] = self.num_attention_heads a : List[str] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) a : Optional[int] = decoder_seq_length + 3 a : str = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) a : Dict = torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(__snake_case , __snake_case )] , dim=1 ) a : Optional[int] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered a , a : List[Any] = self.num_layers a : List[Any] = min(__snake_case , __snake_case ) a : Any = max(__snake_case , __snake_case ) - min_num_layers a : int = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(__snake_case ): common_inputs["past_key_values"].append( ( torch.zeros(__snake_case ), torch.zeros(__snake_case ), torch.zeros(__snake_case ), torch.zeros(__snake_case ), ) ) # TODO: test this. a : List[Any] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(__snake_case , __snake_case ): common_inputs["past_key_values"].append((torch.zeros(__snake_case ), torch.zeros(__snake_case )) ) return common_inputs def lowercase_ ( self : List[str] , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ): a : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch a , a : int = common_inputs['input_ids'].shape # Not using the same length for past_key_values a : List[Any] = seqlen + 2 a , a : List[str] = self.num_layers a , a : Union[str, Any] = self.num_attention_heads a : Any = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) a : str = common_inputs['attention_mask'].dtype a : List[str] = torch.cat( [common_inputs['attention_mask'], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 ) a : str = [ (torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(__snake_case ) ] return common_inputs def lowercase_ ( self : Optional[int] , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX a : int = compute_effective_axis_dimension( __snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX a : Any = tokenizer.num_special_tokens_to_add(__snake_case ) a : Any = compute_effective_axis_dimension( __snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__snake_case ) # Generate dummy inputs according to compute batch and sequence a : int = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size a : int = dict(tokenizer(__snake_case , return_tensors=__snake_case ) ) return common_inputs def lowercase_ ( self : Any , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ): if self.task in ["default", "seq2seq-lm"]: a : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case ) elif self.task == "causal-lm": a : int = self._generate_dummy_inputs_for_causal_lm( __snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case ) else: a : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case ) return common_inputs def lowercase_ ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[int] ): if self.task in ["default", "seq2seq-lm"]: a : int = super()._flatten_past_key_values_(__snake_case , __snake_case , __snake_case , __snake_case ) else: a : Optional[Any] = super(__snake_case , self )._flatten_past_key_values_( __snake_case , __snake_case , __snake_case , __snake_case )
297
'''simple docstring''' import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class a__: def __init__( self : List[Any] , __snake_case : Union[str, Any] ): if isinstance(__snake_case , __snake_case ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden a : str = deepcopy(__snake_case ) elif os.path.exists(__snake_case ): with io.open(__snake_case , 'r' , encoding='utf-8' ) as f: a : Optional[Any] = json.load(__snake_case ) else: try: a : Any = baseaa.urlsafe_baadecode(__snake_case ).decode('utf-8' ) a : Union[str, Any] = json.loads(__snake_case ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" ) a : List[str] = config self.set_stage_and_offload() def lowercase_ ( self : List[str] ): # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs to know the dtype, and some other hparams. a : Dict = self.get_value('zero_optimization.stage' , -1 ) # offload a : str = False if self.is_zeroa() or self.is_zeroa(): a : Union[str, Any] = set(['cpu', 'nvme'] ) a : Optional[Any] = set( [ self.get_value('zero_optimization.offload_optimizer.device' ), self.get_value('zero_optimization.offload_param.device' ), ] ) if len(offload_devices & offload_devices_valid ) > 0: a : List[str] = True def lowercase_ ( self : Optional[Any] , __snake_case : Optional[Any] ): a : str = self.config # find the config node of interest if it exists a : List[str] = ds_key_long.split('.' ) a : Dict = nodes.pop() for node in nodes: a : List[Any] = config.get(__snake_case ) if config is None: return None, ds_key return config, ds_key def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any=None ): a , a : List[Any] = self.find_config_node(__snake_case ) if config is None: return default return config.get(__snake_case , __snake_case ) def lowercase_ ( self : int , __snake_case : Optional[Any] , __snake_case : List[str]=False ): a : Optional[Any] = self.config # find the config node of interest if it exists a : List[str] = ds_key_long.split('.' ) for node in nodes: a : str = config a : Dict = config.get(__snake_case ) if config is None: if must_exist: raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" ) else: return # if found remove it if parent_config is not None: parent_config.pop(__snake_case ) def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] ): a : Union[str, Any] = self.get_value(__snake_case ) return False if value is None else bool(__snake_case ) def lowercase_ ( self : Union[str, Any] , __snake_case : str ): a : Optional[Any] = self.get_value(__snake_case ) return False if value is None else not bool(__snake_case ) def lowercase_ ( self : Optional[Any] ): return self._stage == 2 def lowercase_ ( self : Union[str, Any] ): return self._stage == 3 def lowercase_ ( self : str ): return self._offload class a__: def __init__( self : Tuple , __snake_case : str ): a : Optional[Any] = engine def lowercase_ ( self : Union[str, Any] , __snake_case : str , **__snake_case : Tuple ): # runs backpropagation and handles mixed precision self.engine.backward(__snake_case , **__snake_case ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class a__( lowerCamelCase__ ): def __init__( self : str , __snake_case : List[str] ): super().__init__(__snake_case , device_placement=__snake_case , scaler=__snake_case ) a : Optional[Any] = hasattr(self.optimizer , 'overflow' ) def lowercase_ ( self : Dict , __snake_case : Dict=None ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def lowercase_ ( self : Optional[Any] ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def lowercase_ ( self : Tuple ): if self.__has_overflow__: return self.optimizer.overflow return False class a__( lowerCamelCase__ ): def __init__( self : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ): super().__init__(__snake_case , __snake_case ) def lowercase_ ( self : Any ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class a__: def __init__( self : List[Any] , __snake_case : str , __snake_case : Dict=0.001 , __snake_case : Union[str, Any]=0 , **__snake_case : List[Any] ): a : Optional[Any] = params a : str = lr a : List[str] = weight_decay a : str = kwargs class a__: def __init__( self : str , __snake_case : Optional[Any] , __snake_case : List[str]=None , __snake_case : Tuple=0 , **__snake_case : Any ): a : Union[str, Any] = optimizer a : Any = total_num_steps a : List[str] = warmup_num_steps a : int = kwargs
297
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase: int = logging.get_logger(__name__) lowerCAmelCase: str = torch.device('cpu') def lowerCamelCase__ ( ): a : str = 'http://images.cocodataset.org/val2017/000000039769.jpg' a : List[Any] = Image.open(requests.get(_A , stream=_A ).raw ) return im def lowerCamelCase__ ( _A ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02] ) def lowerCamelCase__ ( _A , _A , _A ): a : Any = dct.pop(_A ) a : List[Any] = val def lowerCamelCase__ ( _A ): a : List[str] = [] for k in state_dict.keys(): a : List[str] = k if ".pwconv" in k: a : Tuple = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: a : str = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: a : Optional[Any] = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: a : int = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: a : Optional[Any] = k_new.split('.' ) if ls[2].isdigit(): a : Any = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: a : Optional[int] = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCamelCase__ ( _A , _A , _A ): a : Any = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size a : Optional[Any] = 1000 a : Optional[Any] = 'huggingface/label-files' a : List[Any] = 'imagenet-1k-id2label.json' a : int = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) ) a : Dict = {int(_A ): v for k, v in idalabel.items()} a : Union[str, Any] = idalabel a : str = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": a : List[str] = [3, 3, 6, 4] a : Optional[int] = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": a : Dict = [3, 3, 9, 6] a : List[Any] = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": a : Any = [4, 3, 10, 5] a : Tuple = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": a : Optional[int] = [4, 4, 12, 6] a : Tuple = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): a : Tuple = torch.hub.load_state_dict_from_url(_A , map_location='cpu' , check_hash=_A ) else: a : Any = torch.load(_A , map_location='cpu' ) a : Union[str, Any] = checkpoint a : Optional[Any] = create_rename_keys(_A ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_A , _A , _A ) # load HuggingFace model a : Optional[int] = SwiftFormerForImageClassification(_A ).eval() hf_model.load_state_dict(_A ) # prepare test inputs a : Dict = prepare_img() a : int = ViTImageProcessor.from_pretrained('preprocessor_config' ) a : List[str] = processor(images=_A , return_tensors='pt' ) # compare outputs from both models a : str = get_expected_output(_A ) a : Optional[Any] = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , _A , atol=1E-3 ) Path(_A ).mkdir(exist_ok=_A ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_A ) if __name__ == "__main__": lowerCAmelCase: Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swiftformer_name', default='swiftformer_xs', choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'], type=str, help='Name of the SwiftFormer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='./converted_outputs/', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.') lowerCAmelCase: Tuple = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
297
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) lowerCAmelCase: int = [ 'cross_validation.py', 'gradient_accumulation.py', 'local_sgd.py', 'multi_process_metrics.py', 'memory.py', 'automatic_gradient_accumulation.py', 'fsdp_with_peak_mem_tracking.py', 'deepspeed_with_config_support.py', 'megatron_lm_gpt_pretraining.py', ] class a__( unittest.TestCase ): def lowercase_ ( self : int , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ): a : Optional[int] = None a : Tuple = os.path.abspath(os.path.join('examples' , 'by_feature' ) ) a : List[str] = os.path.abspath('examples' ) for item in os.listdir(__snake_case ): if item not in EXCLUDE_EXAMPLES: a : int = os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ) and ".py" in item_path: with self.subTest( tested_script=__snake_case , feature_script=__snake_case , tested_section='main()' if parser_only else 'training_function()' , ): a : List[Any] = compare_against_test( os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case ) a : Union[str, Any] = '\n'.join(__snake_case ) if special_strings is not None: for string in special_strings: a : Union[str, Any] = diff.replace(__snake_case , '' ) self.assertEqual(__snake_case , '' ) def lowercase_ ( self : Optional[Any] ): self.one_complete_example('complete_nlp_example.py' , __snake_case ) self.one_complete_example('complete_nlp_example.py' , __snake_case ) def lowercase_ ( self : Any ): a : Dict = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) ) a : int = [ ' ' * 16 + '{\n\n', ' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n', ' ' * 20 + '"f1": eval_metric["f1"],\n\n', ' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n', ' ' * 20 + '"epoch": epoch,\n\n', ' ' * 16 + '},\n\n', ' ' * 16 + 'step=epoch,\n', ' ' * 12, ' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n', ] self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case ) self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case ) @mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} ) class a__( lowerCamelCase__ ): lowercase__ = False @classmethod def lowercase_ ( cls : Optional[int] ): super().setUpClass() a : List[str] = tempfile.mkdtemp() a : Tuple = os.path.join(cls._tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) a : Optional[int] = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def lowercase_ ( cls : Optional[int] ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def lowercase_ ( self : Tuple ): a : Union[str, Any] = F""" examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) ) def lowercase_ ( self : Dict ): a : Union[str, Any] = F""" examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} """.split() a : int = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) ) def lowercase_ ( self : Any ): a : Tuple = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )} """.split() a : int = run_command(self._launch_args + testargs , return_stdout=__snake_case ) self.assertNotIn('epoch 0:' , __snake_case ) self.assertIn('epoch 1:' , __snake_case ) def lowercase_ ( self : int ): a : Optional[int] = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )} """.split() a : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=__snake_case ) if torch.cuda.is_available(): a : Any = torch.cuda.device_count() else: a : str = 1 if num_processes > 1: self.assertNotIn('epoch 0:' , __snake_case ) self.assertIn('epoch 1:' , __snake_case ) else: self.assertIn('epoch 0:' , __snake_case ) self.assertIn('epoch 1:' , __snake_case ) @slow def lowercase_ ( self : Tuple ): a : Tuple = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split() with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ): a : Any = run_command(self._launch_args + testargs , return_stdout=__snake_case ) a : Optional[Any] = re.findall('({.+})' , __snake_case ) a : str = [r for r in results if 'accuracy' in r][-1] a : str = ast.literal_eval(__snake_case ) self.assertGreaterEqual(results['accuracy'] , 0.75 ) def lowercase_ ( self : Optional[int] ): a : int = ['examples/by_feature/multi_process_metrics.py'] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def lowercase_ ( self : Optional[int] ): with tempfile.TemporaryDirectory() as tmpdir: a : Optional[Any] = F""" examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__snake_case , 'tracking' ) ) ) def lowercase_ ( self : List[str] ): a : Optional[Any] = ['examples/by_feature/gradient_accumulation.py'] run_command(self._launch_args + testargs ) def lowercase_ ( self : int ): a : Optional[Any] = ['examples/by_feature/local_sgd.py'] run_command(self._launch_args + testargs )
297
1
'''simple docstring''' def lowerCamelCase__ ( _A ): return 10 - x * x def lowerCamelCase__ ( _A , _A ): # Bolzano theory in order to find if there is a root between a and b if equation(_A ) * equation(_A ) >= 0: raise ValueError('Wrong space!' ) a : Tuple = a while (b - a) >= 0.01: # Find middle point a : Tuple = (a + b) / 2 # Check if middle point is root if equation(_A ) == 0.0: break # Decide the side to repeat the steps if equation(_A ) * equation(_A ) < 0: a : List[str] = c else: a : Tuple = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
297
'''simple docstring''' import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList lowerCAmelCase: List[str] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class a__( lowerCamelCase__ ): def __init__( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : Optional[Any]=1 ): a : Union[str, Any] = tokenizer a : Union[str, Any] = dataset a : Any = len(__snake_case ) if n_tasks is None else n_tasks a : List[str] = n_copies def __iter__( self : str ): a : List[Any] = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() ) a : Dict = self.tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class a__( lowerCamelCase__ ): def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str ): a : Dict = start_length a : Dict = eof_strings a : str = tokenizer def __call__( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : Union[str, Any] ): a : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) a : Optional[int] = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(__snake_case ) def lowerCamelCase__ ( _A ): a : Optional[Any] = re.split('(%s)' % '|'.join(_A ) , _A ) # last string should be "" return "".join(string_list[:-2] ) def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A=20 , **_A ): a : Optional[Any] = defaultdict(_A ) # dict of list of generated tokens for step, batch in tqdm(enumerate(_A ) ): with torch.no_grad(): a : Optional[Any] = batch['ids'].shape[-1] a : Optional[Any] = accelerator.unwrap_model(_A ).generate( input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_A , **_A ) # each task is generated batch_size times a : Tuple = batch['task_id'].repeat(_A ) a : List[Any] = accelerator.pad_across_processes( _A , dim=1 , pad_index=tokenizer.pad_token_id ) a , a : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) ) a : List[str] = generated_tokens.cpu().numpy() a : int = generated_tasks.cpu().numpy() for task, generated_tokens in zip(_A , _A ): gen_token_dict[task].append(_A ) a : Any = [[] for _ in range(_A )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: a : Optional[int] = tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A ) code_gens[task].append(remove_last_block(_A ) ) return code_gens def lowerCamelCase__ ( ): # Setup configuration a : Dict = HfArgumentParser(_A ) a : Any = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric a : List[Any] = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing a : int = 'false' if args.num_workers is None: a : Dict = multiprocessing.cpu_count() # Use dataset load to feed to accelerate a : List[Any] = Accelerator() set_seed(args.seed , device_specific=_A ) # Load model and tokenizer a : Dict = AutoTokenizer.from_pretrained(args.model_ckpt ) a : str = tokenizer.eos_token a : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings a : Optional[Any] = { 'do_sample': args.do_sample, 'temperature': args.temperature, 'max_new_tokens': args.max_new_tokens, 'top_p': args.top_p, 'top_k': args.top_k, 'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _A , _A )] ), } # Load evaluation dataset and metric a : Optional[int] = load_dataset('openai_humaneval' ) a : Optional[Any] = load_metric('code_eval' ) a : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] ) a : Optional[Any] = args.n_samples // args.batch_size a : Any = TokenizedDataset(_A , human_eval['test'] , n_copies=_A , n_tasks=_A ) # do not confuse args.batch_size, which is actually the num_return_sequences a : int = DataLoader(_A , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: a : int = code_eval_metric.compute(references=[''] , predictions=[['']] ) except ValueError as exception: print( 'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`' ' flag to enable code evaluation.' ) raise exception a , a : int = accelerator.prepare(_A , _A ) a : int = complete_code( _A , _A , _A , _A , n_tasks=_A , batch_size=args.batch_size , **_A , ) if accelerator.is_main_process: a : List[str] = [] for task in tqdm(range(_A ) ): a : int = human_eval['test'][task]['test'] a : int = f"""check({human_eval["test"][task]["entry_point"]})""" references.append('\n' + test_func + '\n' + entry_point ) # Evaluate completions with "code_eval" metric a , a : Tuple = code_eval_metric.compute( references=_A , predictions=_A , num_workers=args.num_workers ) print(f"""Results: {pass_at_k}""" ) # Save results to json file with open(args.output_file , 'w' ) as fp: json.dump(_A , _A ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
297
1
'''simple docstring''' from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def lowerCamelCase__ ( _A , _A , _A , _A , _A ): a : List[str] = cva.getAffineTransform(_A , _A ) return cva.warpAffine(_A , _A , (rows, cols) ) if __name__ == "__main__": # read original image lowerCAmelCase: Union[str, Any] = cva.imread( str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg') ) # turn image in gray scale value lowerCAmelCase: int = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape lowerCAmelCase , lowerCAmelCase: str = gray_img.shape # set different points to rotate image lowerCAmelCase: List[str] = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa) lowerCAmelCase: List[str] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa) lowerCAmelCase: Optional[int] = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa) lowerCAmelCase: Any = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa) # add all rotated images in a list lowerCAmelCase: Tuple = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations lowerCAmelCase: List[Any] = plt.figure(1) lowerCAmelCase: int = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3'] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray') plt.title(titles[i]) plt.axis('off') plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
297
'''simple docstring''' import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def lowerCamelCase__ ( _A , _A , _A ): if isinstance(_A , torch.Tensor ): return image elif isinstance(_A , PIL.Image.Image ): a : Any = [image] if isinstance(image[0] , PIL.Image.Image ): a : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image] a : int = np.concatenate(_A , axis=0 ) a : int = np.array(_A ).astype(np.floataa ) / 255.0 a : str = image.transpose(0 , 3 , 1 , 2 ) a : str = 2.0 * image - 1.0 a : Optional[int] = torch.from_numpy(_A ) elif isinstance(image[0] , torch.Tensor ): a : Optional[Any] = torch.cat(_A , dim=0 ) return image def lowerCamelCase__ ( _A , _A , _A , _A=0.9995 ): if not isinstance(_A , np.ndarray ): a : Dict = True a : Optional[Any] = va.device a : Optional[int] = va.cpu().numpy() a : Union[str, Any] = va.cpu().numpy() a : Any = np.sum(va * va / (np.linalg.norm(_A ) * np.linalg.norm(_A )) ) if np.abs(_A ) > DOT_THRESHOLD: a : Any = (1 - t) * va + t * va else: a : Any = np.arccos(_A ) a : Tuple = np.sin(_A ) a : Optional[Any] = theta_a * t a : List[Any] = np.sin(_A ) a : Dict = np.sin(theta_a - theta_t ) / sin_theta_a a : int = sin_theta_t / sin_theta_a a : Any = sa * va + sa * va if inputs_are_torch: a : Dict = torch.from_numpy(_A ).to(_A ) return va def lowerCamelCase__ ( _A , _A ): a : Optional[int] = F.normalize(_A , dim=-1 ) a : str = F.normalize(_A , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def lowerCamelCase__ ( _A , _A ): for param in model.parameters(): a : int = value class a__( lowerCamelCase__ ): def __init__( self : str , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __snake_case : CLIPFeatureExtractor , __snake_case : List[str]=None , __snake_case : List[str]=None , __snake_case : List[Any]=None , ): super().__init__() self.register_modules( vae=__snake_case , text_encoder=__snake_case , clip_model=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , feature_extractor=__snake_case , coca_model=__snake_case , coca_tokenizer=__snake_case , coca_transform=__snake_case , ) a : Optional[Any] = ( feature_extractor.size if isinstance(feature_extractor.size , __snake_case ) else feature_extractor.size['shortest_edge'] ) a : Optional[int] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , __snake_case ) set_requires_grad(self.clip_model , __snake_case ) def lowercase_ ( self : int , __snake_case : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory a : Union[str, Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__snake_case ) def lowercase_ ( self : Union[str, Any] ): self.enable_attention_slicing(__snake_case ) def lowercase_ ( self : Optional[Any] ): set_requires_grad(self.vae , __snake_case ) def lowercase_ ( self : Tuple ): set_requires_grad(self.vae , __snake_case ) def lowercase_ ( self : int ): set_requires_grad(self.unet , __snake_case ) def lowercase_ ( self : Union[str, Any] ): set_requires_grad(self.unet , __snake_case ) def lowercase_ ( self : int , __snake_case : Dict , __snake_case : str , __snake_case : Optional[int] ): # get the original timestep using init_timestep a : Optional[Any] = min(int(num_inference_steps * strength ) , __snake_case ) a : Union[str, Any] = max(num_inference_steps - init_timestep , 0 ) a : List[Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def lowercase_ ( self : Dict , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any]=None ): if not isinstance(__snake_case , torch.Tensor ): raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(__snake_case )}""" ) a : Optional[Any] = image.to(device=__snake_case , dtype=__snake_case ) if isinstance(__snake_case , __snake_case ): a : Optional[int] = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case ) ] a : Optional[Any] = torch.cat(__snake_case , dim=0 ) else: a : Union[str, Any] = self.vae.encode(__snake_case ).latent_dist.sample(__snake_case ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : List[str] = 0.18215 * init_latents a : str = init_latents.repeat_interleave(__snake_case , dim=0 ) a : Dict = randn_tensor(init_latents.shape , generator=__snake_case , device=__snake_case , dtype=__snake_case ) # get latents a : Dict = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case ) a : int = init_latents return latents def lowercase_ ( self : List[str] , __snake_case : Dict ): a : List[Any] = self.coca_transform(__snake_case ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): a : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) a : Union[str, Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' ) def lowercase_ ( self : Tuple , __snake_case : Any , __snake_case : Optional[Any] ): a : List[Any] = self.feature_extractor.preprocess(__snake_case ) a : Optional[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half() a : int = self.clip_model.get_image_features(__snake_case ) a : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case ) a : Tuple = image_embeddings_clip.repeat_interleave(__snake_case , dim=0 ) return image_embeddings_clip @torch.enable_grad() def lowercase_ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[Any] , ): a : Optional[Any] = latents.detach().requires_grad_() a : List[Any] = self.scheduler.scale_model_input(__snake_case , __snake_case ) # predict the noise residual a : Any = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): a : int = self.scheduler.alphas_cumprod[timestep] a : Any = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf a : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 a : Tuple = torch.sqrt(__snake_case ) a : str = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , __snake_case ): a : List[Any] = self.scheduler.sigmas[index] a : Optional[int] = latents - sigma * noise_pred else: raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : Union[str, Any] = 1 / 0.18215 * sample a : str = self.vae.decode(__snake_case ).sample a : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) a : Tuple = transforms.Resize(self.feature_extractor_size )(__snake_case ) a : List[str] = self.normalize(__snake_case ).to(latents.dtype ) a : List[str] = self.clip_model.get_image_features(__snake_case ) a : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case ) a : int = spherical_dist_loss(__snake_case , __snake_case ).mean() * clip_guidance_scale a : List[str] = -torch.autograd.grad(__snake_case , __snake_case )[0] if isinstance(self.scheduler , __snake_case ): a : List[Any] = latents.detach() + grads * (sigma**2) a : Optional[int] = noise_pred_original else: a : List[Any] = noise_pred_original - torch.sqrt(__snake_case ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : Optional[int] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[int] = 5_12 , __snake_case : Optional[int] = 5_12 , __snake_case : float = 0.6 , __snake_case : Optional[int] = 50 , __snake_case : Optional[float] = 7.5 , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[float] = 1_00 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : float = 0.8 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , ): if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size: raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(__snake_case )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(__snake_case , torch.Generator ) and batch_size > 1: a : Dict = [generator] + [None] * (batch_size - 1) a : Any = [ ('model', self.coca_model is None), ('tokenizer', self.coca_tokenizer is None), ('transform', self.coca_transform is None), ] a : List[str] = [x[0] for x in coca_is_none if x[1]] a : List[str] = ', '.join(__snake_case ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(__snake_case ): raise ValueError( F"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) a : int = self.get_image_description(__snake_case ) if style_prompt is None: if len(__snake_case ): raise ValueError( F"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) a : Union[str, Any] = self.get_image_description(__snake_case ) # get prompt text embeddings for content and style a : Optional[Any] = self.tokenizer( __snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , ) a : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] a : Dict = self.tokenizer( __snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , ) a : Dict = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] a : Any = slerp(__snake_case , __snake_case , __snake_case ) # duplicate text embeddings for each generation per prompt a : Optional[Any] = text_embeddings.repeat_interleave(__snake_case , dim=0 ) # set timesteps a : int = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) a : Any = {} if accepts_offset: a : Optional[Any] = 1 self.scheduler.set_timesteps(__snake_case , **__snake_case ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) a , a : Tuple = self.get_timesteps(__snake_case , __snake_case , self.device ) a : Optional[int] = timesteps[:1].repeat(__snake_case ) # Preprocess image a : Optional[Any] = preprocess(__snake_case , __snake_case , __snake_case ) a : List[Any] = self.prepare_latents( __snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case ) a : str = preprocess(__snake_case , __snake_case , __snake_case ) a : Union[str, Any] = self.prepare_latents( __snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case ) a : Union[str, Any] = slerp(__snake_case , __snake_case , __snake_case ) if clip_guidance_scale > 0: a : Dict = self.get_clip_image_embeddings(__snake_case , __snake_case ) a : int = self.get_clip_image_embeddings(__snake_case , __snake_case ) a : List[str] = slerp( __snake_case , __snake_case , __snake_case ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. a : int = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: a : Any = content_text_input.input_ids.shape[-1] a : List[Any] = self.tokenizer([''] , padding='max_length' , max_length=__snake_case , return_tensors='pt' ) a : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt a : Dict = uncond_embeddings.repeat_interleave(__snake_case , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes a : Any = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. a : List[str] = (batch_size, self.unet.config.in_channels, height // 8, width // 8) a : List[str] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps a : int = torch.randn(__snake_case , generator=__snake_case , device='cpu' , dtype=__snake_case ).to( self.device ) else: a : Optional[int] = torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case ) else: if latents.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) a : List[str] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler a : Any = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] a : Optional[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) a : Union[str, Any] = {} if accepts_eta: a : List[str] = eta # check if the scheduler accepts generator a : List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: a : Any = generator with self.progress_bar(total=__snake_case ): for i, t in enumerate(__snake_case ): # expand the latents if we are doing classifier free guidance a : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a : Dict = self.scheduler.scale_model_input(__snake_case , __snake_case ) # predict the noise residual a : List[Any] = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample # perform classifier free guidance if do_classifier_free_guidance: a , a : List[str] = noise_pred.chunk(2 ) a : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: a : Optional[Any] = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) a , a : Union[str, Any] = self.cond_fn( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) # compute the previous noisy sample x_t -> x_t-1 a : Any = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : Tuple = 1 / 0.18215 * latents a : Optional[int] = self.vae.decode(__snake_case ).sample a : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) a : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a : str = self.numpy_to_pil(__snake_case ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case )
297
1
'''simple docstring''' def lowerCamelCase__ ( _A , _A ): a : int = '' for i in table: res += inp[i - 1] return res def lowerCamelCase__ ( _A ): return data[1:] + data[0] def lowerCamelCase__ ( _A , _A ): a : int = '' for i in range(len(_A ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def lowerCamelCase__ ( _A , _A ): a : Union[str, Any] = int('0b' + data[0] + data[-1] , 2 ) a : int = int('0b' + data[1:3] , 2 ) return bin(s[row][col] )[2:] def lowerCamelCase__ ( _A , _A , _A , _A , _A ): a : List[str] = message[:4] a : int = message[4:] a : Any = apply_table(_A , _A ) a : str = xor(_A , _A ) a : Union[str, Any] = apply_sbox(_A , temp[:4] ) # noqa: E741 a : Optional[int] = apply_sbox(_A , temp[4:] ) a : Union[str, Any] = '0' * (2 - len(_A )) + l # noqa: E741 a : str = '0' * (2 - len(_A )) + r a : List[str] = apply_table(l + r , _A ) a : int = xor(_A , _A ) return temp + right if __name__ == "__main__": lowerCAmelCase: int = input('Enter 10 bit key: ') lowerCAmelCase: int = input('Enter 8 bit message: ') lowerCAmelCase: Dict = [6, 3, 7, 4, 8, 5, 1_0, 9] lowerCAmelCase: Optional[Any] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6] lowerCAmelCase: Any = [2, 4, 3, 1] lowerCAmelCase: str = [2, 6, 3, 1, 4, 8, 5, 7] lowerCAmelCase: str = [4, 1, 3, 5, 7, 2, 8, 6] lowerCAmelCase: str = [4, 1, 2, 3, 2, 3, 4, 1] lowerCAmelCase: Any = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] lowerCAmelCase: Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation lowerCAmelCase: List[str] = apply_table(key, paa_table) lowerCAmelCase: str = temp[:5] lowerCAmelCase: List[Any] = temp[5:] lowerCAmelCase: int = left_shift(left) lowerCAmelCase: int = left_shift(right) lowerCAmelCase: Union[str, Any] = apply_table(left + right, pa_table) lowerCAmelCase: Any = left_shift(left) lowerCAmelCase: List[Any] = left_shift(right) lowerCAmelCase: Optional[int] = left_shift(left) lowerCAmelCase: Union[str, Any] = left_shift(right) lowerCAmelCase: Tuple = apply_table(left + right, pa_table) # encryption lowerCAmelCase: Any = apply_table(message, IP) lowerCAmelCase: Any = function(expansion, sa, sa, keya, temp) lowerCAmelCase: Optional[int] = temp[4:] + temp[:4] lowerCAmelCase: Any = function(expansion, sa, sa, keya, temp) lowerCAmelCase: Tuple = apply_table(temp, IP_inv) print('Cipher text is:', CT) # decryption lowerCAmelCase: List[Any] = apply_table(CT, IP) lowerCAmelCase: Optional[int] = function(expansion, sa, sa, keya, temp) lowerCAmelCase: Optional[int] = temp[4:] + temp[:4] lowerCAmelCase: Optional[Any] = function(expansion, sa, sa, keya, temp) lowerCAmelCase: Tuple = apply_table(temp, IP_inv) print('Plain text after decypting is:', PT)
297
'''simple docstring''' def lowerCamelCase__ ( _A , _A , _A , _A , _A , ): a : Dict = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('All input parameters must be positive' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('Relative densities cannot be greater than one' ) else: a : Union[str, Any] = 1 - (matter_density + radiation_density + dark_energy) a : Union[str, Any] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) a : int = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation lowerCAmelCase: Optional[Any] = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
297
1
'''simple docstring''' from collections import defaultdict class a__: def __init__( self : List[Any] , __snake_case : Tuple , __snake_case : Optional[Any] ): a : Dict = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 a : Any = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(__snake_case ) ) ] a : List[Any] = defaultdict(__snake_case ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 a : str = (1 << len(__snake_case )) - 1 def lowercase_ ( self : str , __snake_case : List[Any] , __snake_case : Dict ): # if mask == self.finalmask all persons are distributed tasks, return 1 if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement a : int = self.count_ways_until(__snake_case , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. a : List[str] = total_ways_util return self.dp[mask][task_no] def lowercase_ ( self : str , __snake_case : Tuple ): # Store the list of persons for each task for i in range(len(__snake_case ) ): for j in task_performed[i]: self.task[j].append(__snake_case ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": lowerCAmelCase: List[Any] = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. lowerCAmelCase: Union[str, Any] = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
297
'''simple docstring''' from __future__ import annotations import math class a__: def __init__( self : List[str] , __snake_case : int ): a : str = size # approximate the overall size of segment tree with given value a : Optional[int] = [0 for i in range(0 , 4 * size )] # create array to store lazy update a : Any = [0 for i in range(0 , 4 * size )] a : Dict = [0 for i in range(0 , 4 * size )] # flag for lazy update def lowercase_ ( self : int , __snake_case : int ): return idx * 2 def lowercase_ ( self : Dict , __snake_case : int ): return idx * 2 + 1 def lowercase_ ( self : Dict , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : list[int] ): if left_element == right_element: a : Tuple = a[left_element - 1] else: a : Tuple = (left_element + right_element) // 2 self.build(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case ) self.build(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case ) a : Union[str, Any] = max( self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] ) def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ): if self.flag[idx] is True: a : int = self.lazy[idx] a : Union[str, Any] = False if left_element != right_element: a : Dict = self.lazy[idx] a : int = self.lazy[idx] a : Tuple = True a : Optional[Any] = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: a : int = val if left_element != right_element: a : int = val a : Dict = val a : List[str] = True a : List[str] = True return True a : Tuple = (left_element + right_element) // 2 self.update(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) self.update(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case , __snake_case ) a : Optional[int] = max( self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] ) return True def lowercase_ ( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ): if self.flag[idx] is True: a : str = self.lazy[idx] a : Optional[Any] = False if left_element != right_element: a : Dict = self.lazy[idx] a : Union[str, Any] = self.lazy[idx] a : Dict = True a : int = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] a : Dict = (left_element + right_element) // 2 a : Optional[int] = self.query(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case ) a : Union[str, Any] = self.query(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case ) return max(__snake_case , __snake_case ) def __str__( self : Any ): return str([self.query(1 , 1 , self.size , __snake_case , __snake_case ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": lowerCAmelCase: Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] lowerCAmelCase: int = 1_5 lowerCAmelCase: Optional[int] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
297
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class a__( lowerCamelCase__ ): lowercase__ = 42 class a__( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self : Any , __snake_case : int = 16 , __snake_case : int = 88 , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : float = 0.0 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : bool = True , __snake_case : bool = True , ): super().__init__() a : Dict = num_attention_heads a : Optional[int] = attention_head_dim a : List[Any] = num_attention_heads * attention_head_dim a : Any = in_channels a : Dict = torch.nn.GroupNorm(num_groups=__snake_case , num_channels=__snake_case , eps=1e-6 , affine=__snake_case ) a : Union[str, Any] = nn.Linear(__snake_case , __snake_case ) # 3. Define transformers blocks a : Union[str, Any] = nn.ModuleList( [ BasicTransformerBlock( __snake_case , __snake_case , __snake_case , dropout=__snake_case , cross_attention_dim=__snake_case , activation_fn=__snake_case , attention_bias=__snake_case , double_self_attention=__snake_case , norm_elementwise_affine=__snake_case , ) for d in range(__snake_case ) ] ) a : Dict = nn.Linear(__snake_case , __snake_case ) def lowercase_ ( self : Any , __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : Optional[Any]=None , __snake_case : str=None , __snake_case : Tuple=1 , __snake_case : Tuple=None , __snake_case : bool = True , ): a , a , a , a : int = hidden_states.shape a : Tuple = batch_frames // num_frames a : Optional[int] = hidden_states a : int = hidden_states[None, :].reshape(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) a : Dict = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) a : List[str] = self.norm(__snake_case ) a : Union[str, Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __snake_case , __snake_case ) a : Optional[Any] = self.proj_in(__snake_case ) # 2. Blocks for block in self.transformer_blocks: a : Union[str, Any] = block( __snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , class_labels=__snake_case , ) # 3. Output a : List[str] = self.proj_out(__snake_case ) a : Optional[int] = ( hidden_states[None, None, :] .reshape(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) a : Optional[int] = hidden_states.reshape(__snake_case , __snake_case , __snake_case , __snake_case ) a : Tuple = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=__snake_case )
297
'''simple docstring''' def lowerCamelCase__ ( _A , _A ): while second != 0: a : Union[str, Any] = first & second first ^= second a : Tuple = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase: Optional[int] = int(input('Enter the first number: ').strip()) lowerCAmelCase: Union[str, Any] = int(input('Enter the second number: ').strip()) print(F"{add(first, second) = }")
297
1
'''simple docstring''' import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class a__( lowerCamelCase__ ): def __init__( self : Any , __snake_case : str = "▁" , __snake_case : bool = True , __snake_case : Union[str, AddedToken] = "<unk>" , __snake_case : Union[str, AddedToken] = "</s>" , __snake_case : Union[str, AddedToken] = "<pad>" , ): a : Any = { 'pad': {'id': 0, 'token': pad_token}, 'eos': {'id': 1, 'token': eos_token}, 'unk': {'id': 2, 'token': unk_token}, } a : Optional[Any] = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): a : Tuple = token_dict['token'] a : Any = Tokenizer(Unigram() ) a : Tuple = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(' {2,}' ) , ' ' ), normalizers.Lowercase(), ] ) a : Any = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=__snake_case , add_prefix_space=__snake_case ), pre_tokenizers.Digits(individual_digits=__snake_case ), pre_tokenizers.Punctuation(), ] ) a : Tuple = decoders.Metaspace(replacement=__snake_case , add_prefix_space=__snake_case ) a : str = TemplateProcessing( single=F"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , ) a : Optional[Any] = { 'model': 'SentencePieceUnigram', 'replacement': replacement, 'add_prefix_space': add_prefix_space, } super().__init__(__snake_case , __snake_case ) def lowercase_ ( self : str , __snake_case : Union[str, List[str]] , __snake_case : int = 80_00 , __snake_case : bool = True , ): a : Optional[int] = trainers.UnigramTrainer( vocab_size=__snake_case , special_tokens=self.special_tokens_list , show_progress=__snake_case , ) if isinstance(__snake_case , __snake_case ): a : str = [files] self._tokenizer.train(__snake_case , trainer=__snake_case ) self.add_unk_id() def lowercase_ ( self : str , __snake_case : Union[Iterator[str], Iterator[Iterator[str]]] , __snake_case : int = 80_00 , __snake_case : bool = True , ): a : Tuple = trainers.UnigramTrainer( vocab_size=__snake_case , special_tokens=self.special_tokens_list , show_progress=__snake_case , ) self._tokenizer.train_from_iterator(__snake_case , trainer=__snake_case ) self.add_unk_id() def lowercase_ ( self : Tuple ): a : Union[str, Any] = json.loads(self._tokenizer.to_str() ) a : List[str] = self.special_tokens['unk']['id'] a : List[Any] = Tokenizer.from_str(json.dumps(__snake_case ) )
297
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase__ ( _A , _A ): assert isinstance(_A , _A ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def lowerCamelCase__ ( _A , _A , _A ): a : str = tmp_path / 'cache' a : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def lowerCamelCase__ ( _A , _A , _A ): a : str = tmp_path / 'cache' a : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : Dict = features.copy() if features else default_expected_features a : Union[str, Any] = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( 'features' , [ None, {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}, ] , ) def lowerCamelCase__ ( _A , _A , _A ): a : Tuple = tmp_path / 'cache' a : Optional[Any] = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'} a : Optional[int] = features.copy() if features else default_expected_features a : Dict = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) a : Optional[int] = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowerCamelCase__ ( _A , _A ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} a : Dict = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'} a : int = features.copy() a : List[Any] = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) a : Dict = tmp_path / 'cache' a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def lowerCamelCase__ ( _A , _A , _A ): a : Dict = tmp_path / 'cache' a : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read() _check_json_dataset(_A , _A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def lowerCamelCase__ ( _A , _A , _A ): if issubclass(_A , _A ): a : Optional[int] = jsonl_path elif issubclass(_A , _A ): a : Optional[int] = [jsonl_path] a : List[str] = tmp_path / 'cache' a : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) def lowerCamelCase__ ( _A , _A , _A=("train",) ): assert isinstance(_A , _A ) for split in splits: a : str = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def lowerCamelCase__ ( _A , _A , _A ): a : Dict = tmp_path / 'cache' a : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): a : int = JsonDatasetReader({'train': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def lowerCamelCase__ ( _A , _A , _A ): a : Dict = tmp_path / 'cache' a : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : List[Any] = features.copy() if features else default_expected_features a : Any = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) a : List[str] = JsonDatasetReader({'train': jsonl_path} , features=_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def lowerCamelCase__ ( _A , _A , _A ): if split: a : Any = {split: jsonl_path} else: a : List[Any] = 'train' a : List[str] = {'train': jsonl_path, 'test': jsonl_path} a : List[Any] = tmp_path / 'cache' a : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase__ ( _A ): return json.load(_A ) def lowerCamelCase__ ( _A ): return [json.loads(_A ) for line in buffer] class a__: @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def lowercase_ ( self : Tuple , __snake_case : int , __snake_case : Optional[int] , __snake_case : Any ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case ).write() buffer.seek(0 ) a : List[str] = load_json_function(__snake_case ) assert isinstance(__snake_case , __snake_case ) assert isinstance(exported_content[0] , __snake_case ) assert len(__snake_case ) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def lowercase_ ( self : Tuple , __snake_case : Tuple , __snake_case : Any , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case ).write() buffer.seek(0 ) a : int = load_json(__snake_case ) assert isinstance(__snake_case , __snake_case ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(__snake_case ) == 10 @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def lowercase_ ( self : List[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Optional[Any] ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , num_proc=2 ).write() buffer.seek(0 ) a : List[Any] = load_json_function(__snake_case ) assert isinstance(__snake_case , __snake_case ) assert isinstance(exported_content[0] , __snake_case ) assert len(__snake_case ) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def lowercase_ ( self : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : int , __snake_case : List[Any] , __snake_case : Dict ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case , num_proc=2 ).write() buffer.seek(0 ) a : int = load_json(__snake_case ) assert isinstance(__snake_case , __snake_case ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(__snake_case ) == 10 def lowercase_ ( self : List[str] , __snake_case : str ): with pytest.raises(__snake_case ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , num_proc=0 ) @pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] ) def lowercase_ ( self : Tuple , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Optional[int] ): a : Tuple = tmp_path_factory.mktemp('data' ) / F"""test.json.{extension}""" a : List[Any] = str(shared_datadir / F"""test_file.json.{extension}""" ) JsonDatasetWriter(__snake_case , __snake_case , compression=__snake_case ).write() with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f: a : Union[str, Any] = f.read() with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f: a : Union[str, Any] = f.read() assert exported_content == original_content
297
1
'''simple docstring''' class a__: def __init__( self : Optional[int] ): a : int = '' a : List[Any] = '' a : Optional[Any] = [] def lowercase_ ( self : Optional[int] , __snake_case : int , __snake_case : int ): if m == -1: return n + 1 elif n == -1: return m + 1 elif self.dp[m][n] > -1: return self.dp[m][n] else: if self.worda[m] == self.worda[n]: a : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 ) else: a : Any = self.__min_dist_top_down_dp(__snake_case , n - 1 ) a : Dict = self.__min_dist_top_down_dp(m - 1 , __snake_case ) a : Any = self.__min_dist_top_down_dp(m - 1 , n - 1 ) a : str = 1 + min(__snake_case , __snake_case , __snake_case ) return self.dp[m][n] def lowercase_ ( self : Optional[int] , __snake_case : str , __snake_case : str ): a : List[str] = worda a : Any = worda a : Union[str, Any] = [[-1 for _ in range(len(__snake_case ) )] for _ in range(len(__snake_case ) )] return self.__min_dist_top_down_dp(len(__snake_case ) - 1 , len(__snake_case ) - 1 ) def lowercase_ ( self : List[Any] , __snake_case : str , __snake_case : str ): a : List[Any] = worda a : int = worda a : Union[str, Any] = len(__snake_case ) a : Tuple = len(__snake_case ) a : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )] for i in range(m + 1 ): for j in range(n + 1 ): if i == 0: # first string is empty a : str = j elif j == 0: # second string is empty a : Union[str, Any] = i elif worda[i - 1] == worda[j - 1]: # last characters are equal a : int = self.dp[i - 1][j - 1] else: a : Optional[Any] = self.dp[i][j - 1] a : Union[str, Any] = self.dp[i - 1][j] a : Optional[int] = self.dp[i - 1][j - 1] a : str = 1 + min(__snake_case , __snake_case , __snake_case ) return self.dp[m][n] if __name__ == "__main__": lowerCAmelCase: Optional[Any] = EditDistance() print('****************** Testing Edit Distance DP Algorithm ******************') print() lowerCAmelCase: Dict = input('Enter the first string: ').strip() lowerCAmelCase: Union[str, Any] = input('Enter the second string: ').strip() print() print(F"The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}") print(F"The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}") print() print('*************** End of Testing Edit Distance DP Algorithm ***************')
297
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def lowerCamelCase__ ( _A = "laptop" ): a : Any = f"""https://www.amazon.in/laptop/s?k={product}""" a : Tuple = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36', 'Accept-Language': 'en-US, en;q=0.5', } a : Any = BeautifulSoup(requests.get(_A , headers=_A ).text ) # Initialize a Pandas dataframe with the column titles a : Any = DataFrame( columns=[ 'Product Title', 'Product Link', 'Current Price of the product', 'Product Rating', 'MRP of the product', 'Discount', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( 'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ): try: a : Optional[int] = item.ha.text a : str = 'https://www.amazon.in/' + item.ha.a['href'] a : List[str] = item.find('span' , attrs={'class': 'a-offscreen'} ).text try: a : Optional[Any] = item.find('span' , attrs={'class': 'a-icon-alt'} ).text except AttributeError: a : Union[str, Any] = 'Not available' try: a : str = ( '₹' + item.find( 'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1] ) except AttributeError: a : int = '' try: a : Union[str, Any] = float( ( ( float(product_mrp.strip('₹' ).replace(',' , '' ) ) - float(product_price.strip('₹' ).replace(',' , '' ) ) ) / float(product_mrp.strip('₹' ).replace(',' , '' ) ) ) * 100 ) except ValueError: a : Any = float('nan' ) except AttributeError: pass a : Any = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] a : Any = ' ' a : List[str] = ' ' data_frame.index += 1 return data_frame if __name__ == "__main__": lowerCAmelCase: str = 'headphones' get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
297
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase__ = StableUnCLIPImgaImgPipeline lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowercase__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase__ = frozenset([] ) def lowercase_ ( self : int ): a : Dict = 32 a : str = embedder_hidden_size # image encoding components a : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) a : Dict = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=__snake_case , projection_dim=__snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) a : Dict = StableUnCLIPImageNormalizer(embedding_dim=__snake_case ) a : Optional[int] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' ) torch.manual_seed(0 ) a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) torch.manual_seed(0 ) a : Tuple = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) a : Union[str, Any] = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , ) torch.manual_seed(0 ) a : List[Any] = DDIMScheduler( beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=__snake_case , steps_offset=1 , ) torch.manual_seed(0 ) a : List[str] = AutoencoderKL() a : str = { # image encoding components 'feature_extractor': feature_extractor, 'image_encoder': image_encoder.eval(), # image noising components 'image_normalizer': image_normalizer.eval(), 'image_noising_scheduler': image_noising_scheduler, # regular denoising components 'tokenizer': tokenizer, 'text_encoder': text_encoder.eval(), 'unet': unet.eval(), 'scheduler': scheduler, 'vae': vae.eval(), } return components def lowercase_ ( self : Tuple , __snake_case : List[str] , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=True ): if str(__snake_case ).startswith('mps' ): a : Tuple = torch.manual_seed(__snake_case ) else: a : List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) a : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case ) if pil_image: a : Optional[Any] = input_image * 0.5 + 0.5 a : Optional[Any] = input_image.clamp(0 , 1 ) a : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() a : int = DiffusionPipeline.numpy_to_pil(__snake_case )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def lowercase_ ( self : Optional[Any] ): a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator a : Union[str, Any] = self.get_dummy_components() a : Any = StableUnCLIPImgaImgPipeline(**__snake_case ) a : Tuple = sd_pipe.to(__snake_case ) sd_pipe.set_progress_bar_config(disable=__snake_case ) a : Union[str, Any] = self.get_dummy_inputs(__snake_case ) inputs.update({'image_embeds': None} ) a : str = sd_pipe(**__snake_case ).images a : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) a : Optional[int] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase_ ( self : List[str] ): a : int = torch_device in ['cpu', 'mps'] self._test_attention_slicing_forward_pass(test_max_difference=__snake_case ) def lowercase_ ( self : int ): a : Optional[int] = torch_device in ['cpu', 'mps'] self._test_inference_batch_single_identical(test_max_difference=__snake_case ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowercase_ ( self : Dict ): self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__snake_case ) @slow @require_torch_gpu class a__( unittest.TestCase ): def lowercase_ ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : Optional[Any] ): a : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' ) a : Union[str, Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' ) a : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained( 'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 ) a : Optional[int] = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' ) a : List[str] = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__snake_case , __snake_case ) def lowercase_ ( self : Optional[int] ): a : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' ) a : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' ) a : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained( 'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) a : str = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' ) a : List[str] = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__snake_case , __snake_case ) def lowercase_ ( self : Any ): a : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained( 'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa ) a : Optional[Any] = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a : Optional[int] = pipe( __snake_case , 'anime turtle' , num_inference_steps=2 , output_type='np' , ) a : int = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
297
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase__ = StableUnCLIPImgaImgPipeline lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowercase__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase__ = frozenset([] ) def lowercase_ ( self : int ): a : Dict = 32 a : str = embedder_hidden_size # image encoding components a : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) a : Dict = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=__snake_case , projection_dim=__snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) a : Dict = StableUnCLIPImageNormalizer(embedding_dim=__snake_case ) a : Optional[int] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' ) torch.manual_seed(0 ) a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) torch.manual_seed(0 ) a : Tuple = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) a : Union[str, Any] = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , ) torch.manual_seed(0 ) a : List[Any] = DDIMScheduler( beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=__snake_case , steps_offset=1 , ) torch.manual_seed(0 ) a : List[str] = AutoencoderKL() a : str = { # image encoding components 'feature_extractor': feature_extractor, 'image_encoder': image_encoder.eval(), # image noising components 'image_normalizer': image_normalizer.eval(), 'image_noising_scheduler': image_noising_scheduler, # regular denoising components 'tokenizer': tokenizer, 'text_encoder': text_encoder.eval(), 'unet': unet.eval(), 'scheduler': scheduler, 'vae': vae.eval(), } return components def lowercase_ ( self : Tuple , __snake_case : List[str] , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=True ): if str(__snake_case ).startswith('mps' ): a : Tuple = torch.manual_seed(__snake_case ) else: a : List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) a : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case ) if pil_image: a : Optional[Any] = input_image * 0.5 + 0.5 a : Optional[Any] = input_image.clamp(0 , 1 ) a : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() a : int = DiffusionPipeline.numpy_to_pil(__snake_case )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def lowercase_ ( self : Optional[Any] ): a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator a : Union[str, Any] = self.get_dummy_components() a : Any = StableUnCLIPImgaImgPipeline(**__snake_case ) a : Tuple = sd_pipe.to(__snake_case ) sd_pipe.set_progress_bar_config(disable=__snake_case ) a : Union[str, Any] = self.get_dummy_inputs(__snake_case ) inputs.update({'image_embeds': None} ) a : str = sd_pipe(**__snake_case ).images a : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) a : Optional[int] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase_ ( self : List[str] ): a : int = torch_device in ['cpu', 'mps'] self._test_attention_slicing_forward_pass(test_max_difference=__snake_case ) def lowercase_ ( self : int ): a : Optional[int] = torch_device in ['cpu', 'mps'] self._test_inference_batch_single_identical(test_max_difference=__snake_case ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowercase_ ( self : Dict ): self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__snake_case ) @slow @require_torch_gpu class a__( unittest.TestCase ): def lowercase_ ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : Optional[Any] ): a : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' ) a : Union[str, Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' ) a : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained( 'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 ) a : Optional[int] = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' ) a : List[str] = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__snake_case , __snake_case ) def lowercase_ ( self : Optional[int] ): a : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' ) a : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' ) a : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained( 'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) a : str = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' ) a : List[str] = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__snake_case , __snake_case ) def lowercase_ ( self : Any ): a : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained( 'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa ) a : Optional[Any] = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a : Optional[int] = pipe( __snake_case , 'anime turtle' , num_inference_steps=2 , output_type='np' , ) a : int = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
297
1
'''simple docstring''' from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand lowerCAmelCase: str = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCamelCase__ ( _A ): if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(_A ): return ext raise Exception( f"""Unable to determine file format from file extension {path}. """ f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" ) def lowerCamelCase__ ( _A ): a : Optional[Any] = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) a : Tuple = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format a : Tuple = PipelineDataFormat.from_str( format=_A , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(_A , _A ) class a__( lowerCamelCase__ ): def __init__( self : List[str] , __snake_case : Pipeline , __snake_case : PipelineDataFormat ): a : Dict = nlp a : Optional[int] = reader @staticmethod def lowercase_ ( __snake_case : ArgumentParser ): a : Any = parser.add_parser('run' , help='Run a pipeline through the CLI' ) run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' ) run_parser.add_argument('--input' , type=__snake_case , help='Path to the file to use for inference' ) run_parser.add_argument('--output' , type=__snake_case , help='Path to the file that will be used post to write results.' ) run_parser.add_argument('--model' , type=__snake_case , help='Name or path to the model to instantiate.' ) run_parser.add_argument('--config' , type=__snake_case , help='Name or path to the model\'s config to instantiate.' ) run_parser.add_argument( '--tokenizer' , type=__snake_case , help='Name of the tokenizer to use. (default: same as the model name)' ) run_parser.add_argument( '--column' , type=__snake_case , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , ) run_parser.add_argument( '--format' , type=__snake_case , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , ) run_parser.add_argument( '--device' , type=__snake_case , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , ) run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' ) run_parser.set_defaults(func=__snake_case ) def lowercase_ ( self : Union[str, Any] ): a , a : List[Any] = self._nlp, [] for entry in self._reader: a : List[str] = nlp(**__snake_case ) if self._reader.is_multi_columns else nlp(__snake_case ) if isinstance(__snake_case , __snake_case ): outputs.append(__snake_case ) else: outputs += output # Saving data if self._nlp.binary_output: a : Optional[int] = self._reader.save_binary(__snake_case ) logger.warning(F"""Current pipeline requires output to be in binary format, saving at {binary_path}""" ) else: self._reader.save(__snake_case )
297
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowerCAmelCase: Union[str, Any] = logging.get_logger(__name__) lowerCAmelCase: List[str] = { 't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json', } class a__( lowerCamelCase__ ): lowercase__ = """t5""" lowercase__ = ["""past_key_values"""] lowercase__ = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self : Union[str, Any] , __snake_case : int=3_21_28 , __snake_case : str=5_12 , __snake_case : Dict=64 , __snake_case : Optional[int]=20_48 , __snake_case : Tuple=6 , __snake_case : Any=None , __snake_case : Optional[int]=8 , __snake_case : str=32 , __snake_case : Union[str, Any]=1_28 , __snake_case : Optional[int]=0.1 , __snake_case : Dict=1e-6 , __snake_case : int=1.0 , __snake_case : Optional[int]="relu" , __snake_case : Any=True , __snake_case : List[str]=True , __snake_case : Union[str, Any]=0 , __snake_case : Dict=1 , **__snake_case : Optional[int] , ): a : Optional[int] = vocab_size a : Dict = d_model a : Union[str, Any] = d_kv a : Dict = d_ff a : Tuple = num_layers a : Dict = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry a : int = num_heads a : str = relative_attention_num_buckets a : List[Any] = relative_attention_max_distance a : int = dropout_rate a : Tuple = layer_norm_epsilon a : str = initializer_factor a : List[Any] = feed_forward_proj a : Union[str, Any] = use_cache a : List[str] = self.feed_forward_proj.split('-' ) a : int = act_info[-1] a : Union[str, Any] = act_info[0] == 'gated' if len(__snake_case ) > 1 and act_info[0] != "gated" or len(__snake_case ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": a : Optional[Any] = 'gelu_new' super().__init__( pad_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , **__snake_case , ) class a__( lowerCamelCase__ ): @property def lowercase_ ( self : Optional[int] ): a : Dict = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: a : Dict = 'past_encoder_sequence + sequence' a : Dict = {0: 'batch'} a : Any = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: a : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'} a : List[str] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(__snake_case , direction='inputs' ) return common_inputs @property def lowercase_ ( self : List[Any] ): return 13
297
1
'''simple docstring''' import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class a__( lowerCamelCase__ , lowerCamelCase__ ): lowercase__ = 1 @register_to_config def __init__( self : Any , __snake_case : int = 10_00 , __snake_case : Optional[Union[np.ndarray, List[float]]] = None ): # set `betas`, `alphas`, `timesteps` self.set_timesteps(__snake_case ) # standard deviation of the initial noise distribution a : Any = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. a : Union[str, Any] = 4 # running values a : Optional[Any] = [] def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : Union[str, torch.device] = None ): a : Dict = num_inference_steps a : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] a : str = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: a : Tuple = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: a : Dict = torch.sin(steps * math.pi / 2 ) ** 2 a : int = (1.0 - self.betas**2) ** 0.5 a : str = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] a : Any = timesteps.to(__snake_case ) a : str = [] def lowercase_ ( self : List[str] , __snake_case : torch.FloatTensor , __snake_case : int , __snake_case : torch.FloatTensor , __snake_case : bool = True , ): if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' ) a : str = (self.timesteps == timestep).nonzero().item() a : Optional[int] = timestep_index + 1 a : Optional[int] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(__snake_case ) if len(self.ets ) == 1: a : List[str] = self.ets[-1] elif len(self.ets ) == 2: a : List[str] = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: a : int = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: a : Union[str, Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) a : List[str] = self._get_prev_sample(__snake_case , __snake_case , __snake_case , __snake_case ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__snake_case ) def lowercase_ ( self : int , __snake_case : torch.FloatTensor , *__snake_case : Union[str, Any] , **__snake_case : Any ): return sample def lowercase_ ( self : Any , __snake_case : Any , __snake_case : Dict , __snake_case : Any , __snake_case : Any ): a : Union[str, Any] = self.alphas[timestep_index] a : Union[str, Any] = self.betas[timestep_index] a : Union[str, Any] = self.alphas[prev_timestep_index] a : str = self.betas[prev_timestep_index] a : Optional[Any] = (sample - sigma * ets) / max(__snake_case , 1e-8 ) a : List[str] = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self : List[Any] ): return self.config.num_train_timesteps
297
'''simple docstring''' from __future__ import annotations from math import pi, sqrt def lowerCamelCase__ ( _A , _A ): if inductance <= 0: raise ValueError('Inductance cannot be 0 or negative' ) elif capacitance <= 0: raise ValueError('Capacitance cannot be 0 or negative' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
297
1
'''simple docstring''' lowerCAmelCase: Dict = [0, 2, 4, 6, 8] lowerCAmelCase: Tuple = [1, 3, 5, 7, 9] def lowerCamelCase__ ( _A , _A , _A , _A ): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 a : Tuple = 0 for digit in range(10 ): a : List[Any] = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , _A , _A ) return result a : Dict = 0 for digita in range(10 ): a : Any = digita if (remainder + digita) % 2 == 0: a : int = ODD_DIGITS else: a : Union[str, Any] = EVEN_DIGITS for digita in other_parity_digits: a : Union[str, Any] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , _A , _A , ) return result def lowerCamelCase__ ( _A = 9 ): a : List[str] = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(_A , 0 , [0] * length , _A ) return result if __name__ == "__main__": print(F"{solution() = }")
297
'''simple docstring''' import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase: Any = logging.get_logger(__name__) lowerCAmelCase: Any = {'vocab_file': 'vocab.txt'} lowerCAmelCase: List[Any] = { 'vocab_file': { 'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt', }, } lowerCAmelCase: str = { 'openbmb/cpm-ant-10b': 1_0_2_4, } def lowerCamelCase__ ( _A ): a : Union[str, Any] = collections.OrderedDict() with open(_A , 'r' , encoding='utf-8' ) as reader: a : int = reader.readlines() for index, token in enumerate(_A ): a : int = token.rstrip('\n' ) a : List[Any] = index return vocab class a__( lowerCamelCase__ ): def __init__( self : Dict , __snake_case : Any , __snake_case : Dict="<unk>" , __snake_case : str=2_00 ): a : List[Any] = vocab a : Any = unk_token a : List[str] = max_input_chars_per_word def lowercase_ ( self : Optional[int] , __snake_case : Union[str, Any] ): a : Optional[Any] = list(__snake_case ) if len(__snake_case ) > self.max_input_chars_per_word: return [self.unk_token] a : Any = 0 a : Optional[Any] = [] while start < len(__snake_case ): a : Optional[int] = len(__snake_case ) a : str = None while start < end: a : Optional[Any] = ''.join(chars[start:end] ) if substr in self.vocab: a : List[str] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(__snake_case ) a : List[str] = end return sub_tokens class a__( lowerCamelCase__ ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = ["""input_ids""", """attention_mask"""] lowercase__ = False def __init__( self : Any , __snake_case : str , __snake_case : Tuple="<d>" , __snake_case : List[str]="</d>" , __snake_case : Dict="<s>" , __snake_case : List[Any]="</s>" , __snake_case : int="<pad>" , __snake_case : Any="<unk>" , __snake_case : List[str]="</n>" , __snake_case : int="</_>" , __snake_case : Optional[Any]="left" , **__snake_case : Dict , ): requires_backends(self , ['jieba'] ) super().__init__( bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , ) a : Union[str, Any] = bod_token a : Any = eod_token a : List[str] = load_vocab(__snake_case ) a : Optional[int] = self.encoder[space_token] a : str = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] a : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) ) a : Tuple = {v: k for k, v in self.encoder.items()} a : List[str] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def lowercase_ ( self : Optional[int] ): return self.encoder[self.bod_token] @property def lowercase_ ( self : Dict ): return self.encoder[self.eod_token] @property def lowercase_ ( self : Any ): return self.encoder["\n"] @property def lowercase_ ( self : Tuple ): return len(self.encoder ) def lowercase_ ( self : str ): return dict(self.encoder , **self.added_tokens_encoder ) def lowercase_ ( self : Union[str, Any] , __snake_case : List[str] ): a : List[str] = [] for x in jieba.cut(__snake_case , cut_all=__snake_case ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) ) return output_tokens def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[Any] , **__snake_case : Optional[Any] ): a : Optional[int] = [i for i in token_ids if i >= 0] a : Any = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(__snake_case , **__snake_case ) def lowercase_ ( self : Optional[int] , __snake_case : int ): return token in self.encoder def lowercase_ ( self : int , __snake_case : List[str] ): return "".join(__snake_case ) def lowercase_ ( self : List[str] , __snake_case : Union[str, Any] ): return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) ) def lowercase_ ( self : Tuple , __snake_case : List[str] ): return self.decoder.get(__snake_case , self.unk_token ) def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] = None ): if os.path.isdir(__snake_case ): a : Optional[int] = os.path.join( __snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: a : int = (filename_prefix + '-' if filename_prefix else '') + save_directory a : Any = 0 if " " in self.encoder: a : Union[str, Any] = self.encoder[' '] del self.encoder[" "] if "\n" in self.encoder: a : Tuple = self.encoder['\n'] del self.encoder["\n"] a : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) ) with open(__snake_case , 'w' , encoding='utf-8' ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ' Please check that the vocabulary is not corrupted!' ) a : List[Any] = token_index writer.write(token + '\n' ) index += 1 return (vocab_file,) def lowercase_ ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : List[int] = None ): if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def lowercase_ ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case ) if token_ids_a is not None: return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) return [1] + ([0] * len(__snake_case ))
297
1
'''simple docstring''' from math import pi def lowerCamelCase__ ( _A , _A ): return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(9_0, 1_0))
297
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class a__( unittest.TestCase ): @slow def lowercase_ ( self : List[Any] ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: a : Optional[int] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = TFAutoModel.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Tuple = AutoModel.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : str ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: a : List[str] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : List[Any] = AutoModelForPreTraining.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : int ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Union[str, Any] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : List[Any] = TFAutoModelForCausalLM.from_pretrained(__snake_case , from_pt=__snake_case ) a , a : Any = TFAutoModelForCausalLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = AutoModelForCausalLM.from_pretrained(__snake_case , from_tf=__snake_case ) a , a : Tuple = AutoModelForCausalLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : Any ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Tuple = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : List[str] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : Optional[int] ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : List[str] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(__snake_case , from_pt=__snake_case ) a , a : Optional[int] = TFAutoModelForMaskedLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : str = AutoModelForMaskedLM.from_pretrained(__snake_case , from_tf=__snake_case ) a , a : Tuple = AutoModelForMaskedLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : int ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Optional[Any] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : str = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_pt=__snake_case ) a , a : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = AutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_tf=__snake_case ) a , a : str = AutoModelForSeqaSeqLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : Optional[Any] ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: a : Tuple = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = AutoModelForSequenceClassification.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : str ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: a : Optional[Any] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : int = TFAutoModelForQuestionAnswering.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Tuple = AutoModelForQuestionAnswering.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) def lowercase_ ( self : Tuple ): a : List[Any] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 ) a : Optional[int] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 ) def lowercase_ ( self : Any ): a : int = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 ) a : Optional[Any] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
297
1
'''simple docstring''' import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel lowerCAmelCase: Optional[int] = { 'gwf-440k': { 'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt', 'sample_rate': 4_8_0_0_0, 'sample_size': 6_5_5_3_6, }, 'jmann-small-190k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt', 'sample_rate': 4_8_0_0_0, 'sample_size': 6_5_5_3_6, }, 'jmann-large-580k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt', 'sample_rate': 4_8_0_0_0, 'sample_size': 1_3_1_0_7_2, }, 'maestro-uncond-150k': { 'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt', 'sample_rate': 1_6_0_0_0, 'sample_size': 6_5_5_3_6, }, 'unlocked-uncond-250k': { 'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt', 'sample_rate': 1_6_0_0_0, 'sample_size': 6_5_5_3_6, }, 'honk-140k': { 'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt', 'sample_rate': 1_6_0_0_0, 'sample_size': 6_5_5_3_6, }, } def lowerCamelCase__ ( _A , _A ): return torch.atana(_A , _A ) / math.pi * 2 def lowerCamelCase__ ( _A ): a : int = torch.sin(t * math.pi / 2 ) ** 2 a : Dict = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(_A , _A ) class a__( lowerCamelCase__ ): pass class a__( nn.Module ): def __init__( self : str , __snake_case : Optional[Any] ): super().__init__() a : Any = DiffusionAttnUnetaD(__snake_case , n_attn_layers=4 ) a : Any = deepcopy(self.diffusion ) a : Union[str, Any] = torch.quasirandom.SobolEngine(1 , scramble=__snake_case ) def lowerCamelCase__ ( _A ): a : List[str] = MODELS_MAP[model_name]['url'] os.system(f"""wget {url} ./""" ) return f"""./{model_name}.ckpt""" lowerCAmelCase: int = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', } lowerCAmelCase: Tuple = { '8': 'resnets.0', '9': 'attentions.0', '10': 'resnets.1', '11': 'attentions.1', '12': 'resnets.2', '13': 'attentions.2', } lowerCAmelCase: Any = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', '8': 'resnets.3', '9': 'attentions.3', '10': 'resnets.4', '11': 'attentions.4', '12': 'resnets.5', '13': 'attentions.5', } lowerCAmelCase: int = { '0': 'resnets.0', '1': 'resnets.1', '2': 'resnets.2', '4': 'resnets.0', '5': 'resnets.1', '6': 'resnets.2', } lowerCAmelCase: Optional[int] = { 'skip': 'conv_skip', 'main.0': 'conv_1', 'main.1': 'group_norm_1', 'main.3': 'conv_2', 'main.4': 'group_norm_2', } lowerCAmelCase: str = { 'norm': 'group_norm', 'qkv_proj': ['query', 'key', 'value'], 'out_proj': ['proj_attn'], } def lowerCamelCase__ ( _A ): if name.startswith('skip' ): return name.replace('skip' , RES_CONV_MAP['skip'] ) # name has to be of format main.{digit} if not name.startswith('main.' ): raise ValueError(f"""ResConvBlock error with {name}""" ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def lowerCamelCase__ ( _A ): for key, value in ATTN_MAP.items(): if name.startswith(_A ) and not isinstance(_A , _A ): return name.replace(_A , _A ) elif name.startswith(_A ): return [name.replace(_A , _A ) for v in value] raise ValueError(f"""Attn error with {name}""" ) def lowerCamelCase__ ( _A , _A=13 ): a : Optional[int] = input_string if string.split('.' )[0] == "timestep_embed": return string.replace('timestep_embed' , 'time_proj' ) a : int = 0 if string.startswith('net.3.' ): depth += 1 a : Optional[Any] = string[6:] elif string.startswith('net.' ): a : Optional[int] = string[4:] while string.startswith('main.7.' ): depth += 1 a : List[str] = string[7:] if string.startswith('main.' ): a : Optional[Any] = string[5:] # mid block if string[:2].isdigit(): a : Tuple = string[:2] a : Any = string[2:] else: a : Any = string[0] a : Optional[int] = string[1:] if depth == max_depth: a : int = MID_NUM_TO_LAYER[layer_num] a : List[Any] = 'mid_block' elif depth > 0 and int(_A ) < 7: a : int = DOWN_NUM_TO_LAYER[layer_num] a : Dict = f"""down_blocks.{depth}""" elif depth > 0 and int(_A ) > 7: a : int = UP_NUM_TO_LAYER[layer_num] a : int = f"""up_blocks.{max_depth - depth - 1}""" elif depth == 0: a : Dict = DEPTH_0_TO_LAYER[layer_num] a : str = f"""up_blocks.{max_depth - 1}""" if int(_A ) > 3 else 'down_blocks.0' if not string_left.startswith('.' ): raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" ) a : List[Any] = string_left[1:] if "resnets" in new_layer: a : List[str] = convert_resconv_naming(_A ) elif "attentions" in new_layer: a : List[str] = convert_attn_naming(_A ) a : Any = new_string_left if not isinstance(_A , _A ): a : Union[str, Any] = prefix + '.' + new_layer + '.' + string_left else: a : Any = [prefix + '.' + new_layer + '.' + s for s in string_left] return new_string def lowerCamelCase__ ( _A ): a : List[str] = {} for k, v in state_dict.items(): if k.endswith('kernel' ): # up- and downsample layers, don't have trainable weights continue a : List[str] = rename(_A ) # check if we need to transform from Conv => Linear for attention if isinstance(_A , _A ): a : str = transform_conv_attns(_A , _A , _A ) else: a : List[Any] = v return new_state_dict def lowerCamelCase__ ( _A , _A , _A ): if len(_A ) == 1: if len(v.shape ) == 3: # weight a : Union[str, Any] = v[:, :, 0] else: # bias a : int = v else: # qkv matrices a : Any = v.shape[0] a : Union[str, Any] = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: a : Any = v[i * single_shape : (i + 1) * single_shape, :, 0] else: a : str = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def lowerCamelCase__ ( _A ): a : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) a : Union[str, Any] = args.model_path.split('/' )[-1].split('.' )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}""" a : Dict = download(_A ) a : Any = MODELS_MAP[model_name]['sample_rate'] a : List[Any] = MODELS_MAP[model_name]['sample_size'] a : Tuple = Object() a : Any = sample_size a : Optional[Any] = sample_rate a : Optional[Any] = 0 a : List[Any] = UNetaDModel(sample_size=_A , sample_rate=_A ) a : Optional[Any] = diffusers_model.state_dict() a : Optional[Any] = DiffusionUncond(_A ) orig_model.load_state_dict(torch.load(args.model_path , map_location=_A )['state_dict'] ) a : List[str] = orig_model.diffusion_ema.eval() a : Dict = orig_model.state_dict() a : int = rename_orig_weights(_A ) a : Union[str, Any] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) a : Union[str, Any] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(_A ) == 0, f"""Problem with {renamed_minus_diffusers}""" assert all(k.endswith('kernel' ) for k in list(_A ) ), f"""Problem with {diffusers_minus_renamed}""" for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}""" if key == "time_proj.weight": a : List[str] = value.squeeze() a : Any = value diffusers_model.load_state_dict(_A ) a : Dict = 100 a : Optional[Any] = 33 a : List[Any] = IPNDMScheduler(num_train_timesteps=_A ) a : List[str] = torch.manual_seed(_A ) a : int = torch.randn([1, 2, config.sample_size] , generator=_A ).to(_A ) a : Union[str, Any] = torch.linspace(1 , 0 , steps + 1 , device=_A )[:-1] a : Dict = get_crash_schedule(_A ) a : Tuple = DanceDiffusionPipeline(unet=_A , scheduler=_A ) a : Dict = torch.manual_seed(33 ) a : Tuple = pipe(num_inference_steps=_A , generator=_A ).audios a : Dict = sampling.iplms_sample(_A , _A , _A , {} ) a : Dict = generated.clamp(-1 , 1 ) a : Optional[Any] = (generated - audio).abs().sum() a : Tuple = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print('Diff sum' , _A ) print('Diff max' , _A ) assert diff_max < 1E-3, f"""Diff max: {diff_max} is too much :-/""" print(f"""Conversion for {model_name} successful!""" ) if __name__ == "__main__": lowerCAmelCase: Any = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') lowerCAmelCase: str = parser.parse_args() main(args)
297
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase: List[Any] = logging.get_logger(__name__) lowerCAmelCase: List[Any] = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class a__( lowerCamelCase__ ): lowercase__ = """roberta""" def __init__( self : Tuple , __snake_case : List[str]=5_02_65 , __snake_case : int=7_68 , __snake_case : Union[str, Any]=12 , __snake_case : Dict=12 , __snake_case : Tuple=30_72 , __snake_case : Optional[Any]="gelu" , __snake_case : str=0.1 , __snake_case : Any=0.1 , __snake_case : str=5_12 , __snake_case : int=2 , __snake_case : Any=0.02 , __snake_case : int=1e-1_2 , __snake_case : str=1 , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=2 , __snake_case : Optional[int]="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=None , **__snake_case : str , ): super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) a : List[str] = vocab_size a : str = hidden_size a : Tuple = num_hidden_layers a : Dict = num_attention_heads a : List[Any] = hidden_act a : str = intermediate_size a : Union[str, Any] = hidden_dropout_prob a : Optional[Any] = attention_probs_dropout_prob a : Any = max_position_embeddings a : Optional[int] = type_vocab_size a : str = initializer_range a : List[Any] = layer_norm_eps a : Optional[int] = position_embedding_type a : Dict = use_cache a : Any = classifier_dropout class a__( lowerCamelCase__ ): @property def lowercase_ ( self : int ): if self.task == "multiple-choice": a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: a : str = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
297
1
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase: Dict = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Tuple = ['DPTFeatureExtractor'] lowerCAmelCase: Any = ['DPTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: List[Any] = [ 'DPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DPTForDepthEstimation', 'DPTForSemanticSegmentation', 'DPTModel', 'DPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys lowerCAmelCase: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
297
'''simple docstring''' def lowerCamelCase__ ( _A ): return 10 - x * x def lowerCamelCase__ ( _A , _A ): # Bolzano theory in order to find if there is a root between a and b if equation(_A ) * equation(_A ) >= 0: raise ValueError('Wrong space!' ) a : Tuple = a while (b - a) >= 0.01: # Find middle point a : Tuple = (a + b) / 2 # Check if middle point is root if equation(_A ) == 0.0: break # Decide the side to repeat the steps if equation(_A ) * equation(_A ) < 0: a : List[str] = c else: a : Tuple = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
297
1
'''simple docstring''' from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run lowerCAmelCase: Optional[Any] = True except (ImportError, AttributeError): lowerCAmelCase: Any = object def lowerCamelCase__ ( *_A , **_A ): pass lowerCAmelCase: Optional[int] = False lowerCAmelCase: Optional[int] = logging.get_logger('transformers-cli/serving') def lowerCamelCase__ ( _A ): a : Tuple = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) return ServeCommand(_A , args.host , args.port , args.workers ) class a__( lowerCamelCase__ ): lowercase__ = 42 class a__( lowerCamelCase__ ): lowercase__ = 42 lowercase__ = 42 class a__( lowerCamelCase__ ): lowercase__ = 42 class a__( lowerCamelCase__ ): lowercase__ = 42 class a__( lowerCamelCase__ ): @staticmethod def lowercase_ ( __snake_case : ArgumentParser ): a : str = parser.add_parser( 'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' ) serve_parser.add_argument( '--task' , type=__snake_case , choices=get_supported_tasks() , help='The task to run the pipeline on' , ) serve_parser.add_argument('--host' , type=__snake_case , default='localhost' , help='Interface the server will listen on.' ) serve_parser.add_argument('--port' , type=__snake_case , default=88_88 , help='Port the serving will listen to.' ) serve_parser.add_argument('--workers' , type=__snake_case , default=1 , help='Number of http workers' ) serve_parser.add_argument('--model' , type=__snake_case , help='Model\'s name or path to stored model.' ) serve_parser.add_argument('--config' , type=__snake_case , help='Model\'s config name or path to stored model.' ) serve_parser.add_argument('--tokenizer' , type=__snake_case , help='Tokenizer name to use.' ) serve_parser.add_argument( '--device' , type=__snake_case , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , ) serve_parser.set_defaults(func=__snake_case ) def __init__( self : Union[str, Any] , __snake_case : Pipeline , __snake_case : str , __snake_case : int , __snake_case : int ): a : Tuple = pipeline a : Optional[Any] = host a : Union[str, Any] = port a : Any = workers if not _serve_dependencies_installed: raise RuntimeError( 'Using serve command requires FastAPI and uvicorn. ' 'Please install transformers with [serving]: pip install "transformers[serving]".' 'Or install FastAPI and uvicorn separately.' ) else: logger.info(F"""Serving model over {host}:{port}""" ) a : int = FastAPI( routes=[ APIRoute( '/' , self.model_info , response_model=__snake_case , response_class=__snake_case , methods=['GET'] , ), APIRoute( '/tokenize' , self.tokenize , response_model=__snake_case , response_class=__snake_case , methods=['POST'] , ), APIRoute( '/detokenize' , self.detokenize , response_model=__snake_case , response_class=__snake_case , methods=['POST'] , ), APIRoute( '/forward' , self.forward , response_model=__snake_case , response_class=__snake_case , methods=['POST'] , ), ] , timeout=6_00 , ) def lowercase_ ( self : Optional[Any] ): run(self._app , host=self.host , port=self.port , workers=self.workers ) def lowercase_ ( self : Optional[int] ): return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) ) def lowercase_ ( self : Dict , __snake_case : str = Body(__snake_case , embed=__snake_case ) , __snake_case : bool = Body(__snake_case , embed=__snake_case ) ): try: a : Dict = self._pipeline.tokenizer.tokenize(__snake_case ) if return_ids: a : Any = self._pipeline.tokenizer.convert_tokens_to_ids(__snake_case ) return ServeTokenizeResult(tokens=__snake_case , tokens_ids=__snake_case ) else: return ServeTokenizeResult(tokens=__snake_case ) except Exception as e: raise HTTPException(status_code=5_00 , detail={'model': '', 'error': str(__snake_case )} ) def lowercase_ ( self : int , __snake_case : List[int] = Body(__snake_case , embed=__snake_case ) , __snake_case : bool = Body(__snake_case , embed=__snake_case ) , __snake_case : bool = Body(__snake_case , embed=__snake_case ) , ): try: a : Any = self._pipeline.tokenizer.decode(__snake_case , __snake_case , __snake_case ) return ServeDeTokenizeResult(model='' , text=__snake_case ) except Exception as e: raise HTTPException(status_code=5_00 , detail={'model': '', 'error': str(__snake_case )} ) async def lowercase_ ( self : str , __snake_case : List[str]=Body(__snake_case , embed=__snake_case ) ): # Check we don't have empty string if len(__snake_case ) == 0: return ServeForwardResult(output=[] , attention=[] ) try: # Forward through the model a : Tuple = self._pipeline(__snake_case ) return ServeForwardResult(output=__snake_case ) except Exception as e: raise HTTPException(5_00 , {'error': str(__snake_case )} )
297
'''simple docstring''' import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class a__: def __init__( self : str , __snake_case : Union[str, Any] , __snake_case : List[str]=13 , __snake_case : Tuple=7 , __snake_case : Optional[Any]=False , __snake_case : Dict=True , __snake_case : List[Any]=False , __snake_case : Optional[int]=False , __snake_case : Optional[Any]=19 , __snake_case : Any=32 , __snake_case : Union[str, Any]=5 , __snake_case : Union[str, Any]=4 , __snake_case : int=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=0.1 , __snake_case : int=5_12 , __snake_case : int=16 , __snake_case : Tuple=2 , __snake_case : str=0.02 , __snake_case : str=3 , __snake_case : Dict=4 , __snake_case : List[Any]=None , ): a : Tuple = parent a : List[str] = batch_size a : Optional[Any] = seq_length a : Tuple = is_training a : Optional[Any] = use_input_mask a : List[Any] = use_token_type_ids a : List[Any] = use_labels a : int = vocab_size a : Union[str, Any] = hidden_size a : Any = num_hidden_layers a : List[str] = num_attention_heads a : int = intermediate_size a : str = hidden_act a : Tuple = hidden_dropout_prob a : Union[str, Any] = attention_probs_dropout_prob a : List[str] = max_position_embeddings a : Any = type_vocab_size a : List[str] = type_sequence_label_size a : Union[str, Any] = initializer_range a : Optional[int] = num_labels a : Optional[Any] = num_choices a : Optional[int] = scope def lowercase_ ( self : List[Any] ): a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a : Dict = None if self.use_input_mask: a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) a : Optional[Any] = None a : Optional[int] = None a : Dict = None if self.use_labels: a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a : List[str] = ids_tensor([self.batch_size] , self.num_choices ) a : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase_ ( self : List[Any] ): a : Any = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__snake_case , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , ) return config def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Any ): a : Tuple = EsmForProteinFolding(config=__snake_case ).float() model.to(__snake_case ) model.eval() a : Dict = model(__snake_case , attention_mask=__snake_case ) a : Union[str, Any] = model(__snake_case ) a : List[Any] = model(__snake_case ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def lowercase_ ( self : Optional[Any] ): a : Tuple = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) : Optional[Any] = config_and_inputs a : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase__ = False lowercase__ = (EsmForProteinFolding,) if is_torch_available() else () lowercase__ = () lowercase__ = {} if is_torch_available() else {} lowercase__ = False def lowercase_ ( self : int ): a : Tuple = EsmFoldModelTester(self ) a : Any = ConfigTester(self , config_class=__snake_case , hidden_size=37 ) def lowercase_ ( self : List[str] ): self.config_tester.run_common_tests() def lowercase_ ( self : Union[str, Any] ): a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) @unittest.skip('Does not support attention outputs' ) def lowercase_ ( self : str ): pass @unittest.skip def lowercase_ ( self : Optional[int] ): pass @unittest.skip('Esm does not support embedding resizing' ) def lowercase_ ( self : Optional[int] ): pass @unittest.skip('Esm does not support embedding resizing' ) def lowercase_ ( self : Any ): pass @unittest.skip('ESMFold does not support passing input embeds!' ) def lowercase_ ( self : Any ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : Union[str, Any] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : int ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold does not output hidden states in the normal way.' ) def lowercase_ ( self : int ): pass @unittest.skip('ESMfold does not output hidden states in the normal way.' ) def lowercase_ ( self : int ): pass @unittest.skip('ESMFold only has one output format.' ) def lowercase_ ( self : Dict ): pass @unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' ) def lowercase_ ( self : Tuple ): pass @unittest.skip('ESMFold does not support input chunking.' ) def lowercase_ ( self : List[str] ): pass @unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowercase_ ( self : Union[str, Any] ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowercase_ ( self : Any ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowercase_ ( self : List[str] ): pass @unittest.skip('ESMFold doesn\'t support data parallel.' ) def lowercase_ ( self : Dict ): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase_ ( self : Union[str, Any] ): pass @require_torch class a__( lowerCamelCase__ ): @slow def lowercase_ ( self : Optional[int] ): a : Optional[Any] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float() model.eval() a : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) a : Any = model(__snake_case )['positions'] a : Dict = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __snake_case , atol=1e-4 ) )
297
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class a__( unittest.TestCase ): def lowercase_ ( self : int ): a : Union[str, Any] = tempfile.mkdtemp() # fmt: off a : List[Any] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest'] # fmt: on a : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) a : Union[str, Any] = { 'do_resize': True, 'size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.5, 0.5, 0.5], 'image_std': [0.5, 0.5, 0.5], } a : List[str] = os.path.join(self.tmpdirname , __snake_case ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(__snake_case , __snake_case ) def lowercase_ ( self : Optional[Any] , **__snake_case : List[str] ): return BertTokenizer.from_pretrained(self.tmpdirname , **__snake_case ) def lowercase_ ( self : List[str] , **__snake_case : List[Any] ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **__snake_case ) def lowercase_ ( self : Dict ): shutil.rmtree(self.tmpdirname ) def lowercase_ ( self : Any ): a : Optional[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] a : Optional[int] = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase_ ( self : Any ): a : Dict = self.get_tokenizer() a : Tuple = self.get_image_processor() a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case ) processor.save_pretrained(self.tmpdirname ) a : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __snake_case ) def lowercase_ ( self : Union[str, Any] ): a : str = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) a : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) a : Any = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 ) a : Dict = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__snake_case , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __snake_case ) def lowercase_ ( self : Optional[int] ): a : int = self.get_image_processor() a : Union[str, Any] = self.get_tokenizer() a : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case ) a : List[Any] = self.prepare_image_inputs() a : Optional[Any] = image_processor(__snake_case , return_tensors='np' ) a : List[Any] = processor(images=__snake_case , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase_ ( self : List[Any] ): a : List[str] = self.get_image_processor() a : Tuple = self.get_tokenizer() a : List[Any] = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case ) a : Tuple = 'lower newer' a : Dict = processor(text=__snake_case ) a : Any = tokenizer(__snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase_ ( self : Tuple ): a : List[Any] = self.get_image_processor() a : str = self.get_tokenizer() a : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case ) a : Any = 'lower newer' a : List[Any] = self.prepare_image_inputs() a : Tuple = processor(text=__snake_case , images=__snake_case ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with self.assertRaises(__snake_case ): processor() def lowercase_ ( self : List[Any] ): a : Dict = self.get_image_processor() a : List[Any] = self.get_tokenizer() a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case ) a : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a : Optional[int] = processor.batch_decode(__snake_case ) a : Dict = tokenizer.batch_decode(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) def lowercase_ ( self : Optional[Any] ): a : Optional[Any] = self.get_image_processor() a : List[Any] = self.get_tokenizer() a : Dict = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case ) a : Optional[int] = 'lower newer' a : Optional[int] = self.prepare_image_inputs() a : List[str] = processor(text=__snake_case , images=__snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
297
'''simple docstring''' from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class a__( nn.Module ): def __init__( self : Any , __snake_case : int = 16 , __snake_case : int = 88 , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : float = 0.0 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , ): super().__init__() a : Optional[int] = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__snake_case , attention_head_dim=__snake_case , in_channels=__snake_case , num_layers=__snake_case , dropout=__snake_case , norm_num_groups=__snake_case , cross_attention_dim=__snake_case , attention_bias=__snake_case , sample_size=__snake_case , num_vector_embeds=__snake_case , activation_fn=__snake_case , num_embeds_ada_norm=__snake_case , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference a : Union[str, Any] = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` a : Tuple = [77, 2_57] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` a : Any = [1, 0] def lowercase_ ( self : str , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Dict=None , __snake_case : bool = True , ): a : Dict = hidden_states a : Tuple = [] a : Optional[int] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens a : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] a : Tuple = self.transformer_index_for_condition[i] a : Union[str, Any] = self.transformers[transformer_index]( __snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , return_dict=__snake_case , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] a : Optional[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) a : int = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__snake_case )
297
1
'''simple docstring''' import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a__( lowerCamelCase__ ): def lowercase_ ( self : List[Any] ): a : Dict = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__snake_case , 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(__snake_case , 'neck_hidden_sizes' ) ) self.parent.assertTrue(hasattr(__snake_case , 'num_attention_heads' ) ) class a__: def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Union[str, Any]=13 , __snake_case : Dict=32 , __snake_case : Union[str, Any]=2 , __snake_case : Optional[Any]=3 , __snake_case : Any=6_40 , __snake_case : Any=4 , __snake_case : Optional[Any]="silu" , __snake_case : Union[str, Any]=3 , __snake_case : List[str]=32 , __snake_case : int=0.1 , __snake_case : List[str]=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Optional[Any]=0.02 , __snake_case : Dict=True , __snake_case : int=True , __snake_case : int=10 , __snake_case : Tuple=None , ): a : Dict = parent a : Dict = batch_size a : Any = image_size a : List[str] = patch_size a : Any = num_channels a : List[Any] = last_hidden_size a : Optional[int] = num_attention_heads a : List[str] = hidden_act a : Dict = conv_kernel_size a : Union[str, Any] = output_stride a : str = hidden_dropout_prob a : Any = attention_probs_dropout_prob a : Optional[Any] = classifier_dropout_prob a : List[Any] = use_labels a : Tuple = is_training a : Optional[Any] = num_labels a : Union[str, Any] = initializer_range a : str = scope def lowercase_ ( self : Optional[Any] ): a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a : Any = None a : str = None if self.use_labels: a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) a : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) a : Optional[Any] = self.get_config() return config, pixel_values, labels, pixel_labels def lowercase_ ( self : Union[str, Any] ): return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def lowercase_ ( self : Tuple , __snake_case : int , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : int ): a : Tuple = MobileViTModel(config=__snake_case ) model.to(__snake_case ) model.eval() a : List[str] = model(__snake_case ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowercase_ ( self : Optional[int] , __snake_case : Dict , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Dict ): a : int = self.num_labels a : Union[str, Any] = MobileViTForImageClassification(__snake_case ) model.to(__snake_case ) model.eval() a : List[Any] = model(__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self : str , __snake_case : Dict , __snake_case : Dict , __snake_case : Any , __snake_case : str ): a : int = self.num_labels a : List[str] = MobileViTForSemanticSegmentation(__snake_case ) model.to(__snake_case ) model.eval() a : Tuple = model(__snake_case ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) a : int = model(__snake_case , labels=__snake_case ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowercase_ ( self : int ): a : List[str] = self.prepare_config_and_inputs() a , a , a , a : Optional[Any] = config_and_inputs a : List[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase__ = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) lowercase__ = ( { """feature-extraction""": MobileViTModel, """image-classification""": MobileViTForImageClassification, """image-segmentation""": MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def lowercase_ ( self : Any ): a : str = MobileViTModelTester(self ) a : Any = MobileViTConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case ) def lowercase_ ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason='MobileViT does not use inputs_embeds' ) def lowercase_ ( self : Optional[int] ): pass @unittest.skip(reason='MobileViT does not support input and output embeddings' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip(reason='MobileViT does not output attentions' ) def lowercase_ ( self : Optional[int] ): pass def lowercase_ ( self : str ): a , a : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a : List[Any] = model_class(__snake_case ) a : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a : List[Any] = [*signature.parameters.keys()] a : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __snake_case ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase_ ( self : Tuple ): pass def lowercase_ ( self : Optional[int] ): a : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def lowercase_ ( self : Optional[int] ): def check_hidden_states_output(__snake_case : int , __snake_case : Any , __snake_case : Optional[Any] ): a : Optional[Any] = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): a : List[str] = model(**self._prepare_for_class(__snake_case , __snake_case ) ) a : Optional[Any] = outputs.hidden_states a : Dict = 5 self.assertEqual(len(__snake_case ) , __snake_case ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. a : List[str] = 2 for i in range(len(__snake_case ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) a , a : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a : Optional[int] = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a : Optional[Any] = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) def lowercase_ ( self : Union[str, Any] ): a : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__snake_case ) def lowercase_ ( self : Union[str, Any] ): a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__snake_case ) @slow def lowercase_ ( self : Optional[Any] ): for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Tuple = MobileViTModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) def lowerCamelCase__ ( ): a : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class a__( unittest.TestCase ): @cached_property def lowercase_ ( self : Optional[Any] ): return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None @slow def lowercase_ ( self : List[str] ): a : Dict = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(__snake_case ) a : Union[str, Any] = self.default_image_processor a : List[str] = prepare_img() a : Any = image_processor(images=__snake_case , return_tensors='pt' ).to(__snake_case ) # forward pass with torch.no_grad(): a : int = model(**__snake_case ) # verify the logits a : Dict = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __snake_case ) a : List[str] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(__snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) ) @slow def lowercase_ ( self : Tuple ): a : Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) a : Optional[Any] = model.to(__snake_case ) a : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) a : int = prepare_img() a : List[str] = image_processor(images=__snake_case , return_tensors='pt' ).to(__snake_case ) # forward pass with torch.no_grad(): a : Any = model(**__snake_case ) a : List[str] = outputs.logits # verify the logits a : Optional[Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , __snake_case ) a : Any = torch.tensor( [ [[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]], [[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]], [[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]], ] , device=__snake_case , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-4 ) ) @slow def lowercase_ ( self : List[Any] ): a : List[Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) a : List[str] = model.to(__snake_case ) a : Optional[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) a : Optional[Any] = prepare_img() a : str = image_processor(images=__snake_case , return_tensors='pt' ).to(__snake_case ) # forward pass with torch.no_grad(): a : Tuple = model(**__snake_case ) a : Optional[Any] = outputs.logits.detach().cpu() a : int = image_processor.post_process_semantic_segmentation(outputs=__snake_case , target_sizes=[(50, 60)] ) a : Optional[Any] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , __snake_case ) a : str = image_processor.post_process_semantic_segmentation(outputs=__snake_case ) a : int = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , __snake_case )
297
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase: Union[str, Any] = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: List[Any] = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Any = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase: Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
297
1
'''simple docstring''' import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger lowerCAmelCase: str = get_logger(__name__) lowerCAmelCase: Any = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n' class a__: @add_start_docstrings(__snake_case ) def __call__( self : List[str] , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray ): raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) class a__: @add_start_docstrings(__snake_case ) def __call__( self : str , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray ): raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) class a__( lowerCamelCase__ ): @add_start_docstrings(__snake_case ) def __call__( self : List[Any] , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray , __snake_case : int , **__snake_case : Union[str, Any] ): for processor in self: a : List[str] = inspect.signature(processor.__call__ ).parameters if len(__snake_case ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( F"""Make sure that all the required parameters: {list(function_args.keys() )} for """ F"""{processor.__class__} are passed to the logits processor.""" ) a : Optional[Any] = processor(__snake_case , __snake_case , __snake_case , **__snake_case ) else: a : Tuple = processor(__snake_case , __snake_case , __snake_case ) return scores class a__( lowerCamelCase__ ): def __init__( self : Optional[int] , __snake_case : float ): if not isinstance(__snake_case , __snake_case ) or not (temperature > 0): raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""" ) a : Dict = temperature def __call__( self : Optional[int] , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray , __snake_case : int ): a : List[Any] = scores / self.temperature return scores class a__( lowerCamelCase__ ): def __init__( self : Dict , __snake_case : float , __snake_case : float = -float('Inf' ) , __snake_case : int = 1 ): if not isinstance(__snake_case , __snake_case ) or (top_p < 0 or top_p > 1.0): raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" ) if not isinstance(__snake_case , __snake_case ) or (min_tokens_to_keep < 1): raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" ) a : List[str] = top_p a : Optional[int] = filter_value a : Optional[int] = min_tokens_to_keep def __call__( self : Tuple , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray , __snake_case : int ): a , a : List[str] = lax.top_k(__snake_case , scores.shape[-1] ) a : str = jnp.full_like(__snake_case , self.filter_value ) a : Dict = jax.nn.softmax(__snake_case , axis=-1 ).cumsum(axis=-1 ) a : List[Any] = cumulative_probs < self.top_p # include the token that is higher than top_p as well a : Dict = jnp.roll(__snake_case , 1 ) score_mask |= score_mask.at[:, 0].set(__snake_case ) # min tokens to keep a : Optional[Any] = score_mask.at[:, : self.min_tokens_to_keep].set(__snake_case ) a : List[Any] = jnp.where(__snake_case , __snake_case , __snake_case ) a : Optional[int] = jax.lax.sort_key_val(__snake_case , __snake_case )[-1] return next_scores class a__( lowerCamelCase__ ): def __init__( self : int , __snake_case : int , __snake_case : float = -float('Inf' ) , __snake_case : int = 1 ): if not isinstance(__snake_case , __snake_case ) or top_k <= 0: raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""" ) a : Tuple = max(__snake_case , __snake_case ) a : Any = filter_value def __call__( self : str , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray , __snake_case : int ): a , a : Optional[int] = scores.shape a : int = jnp.full(batch_size * vocab_size , self.filter_value ) a : List[Any] = min(self.top_k , scores.shape[-1] ) # Safety check a , a : Optional[Any] = lax.top_k(__snake_case , __snake_case ) a : Optional[Any] = jnp.broadcast_to((jnp.arange(__snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() a : int = topk_scores.flatten() a : Union[str, Any] = topk_indices.flatten() + shift a : List[str] = next_scores_flat.at[topk_indices_flat].set(__snake_case ) a : Dict = next_scores_flat.reshape(__snake_case , __snake_case ) return next_scores class a__( lowerCamelCase__ ): def __init__( self : Union[str, Any] , __snake_case : int ): a : Any = bos_token_id def __call__( self : Optional[Any] , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray , __snake_case : int ): a : Tuple = jnp.full(scores.shape , -float('inf' ) ) a : Union[str, Any] = 1 - jnp.bool_(cur_len - 1 ) a : List[Any] = jnp.where(__snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , __snake_case ) return scores class a__( lowerCamelCase__ ): def __init__( self : Optional[int] , __snake_case : int , __snake_case : int ): a : Optional[int] = max_length a : Optional[Any] = eos_token_id def __call__( self : List[Any] , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray , __snake_case : int ): a : List[Any] = jnp.full(scores.shape , -float('inf' ) ) a : Optional[int] = 1 - jnp.bool_(cur_len - self.max_length + 1 ) a : List[str] = jnp.where(__snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , __snake_case ) return scores class a__( lowerCamelCase__ ): def __init__( self : List[Any] , __snake_case : int , __snake_case : int ): if not isinstance(__snake_case , __snake_case ) or min_length < 0: raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""" ) if not isinstance(__snake_case , __snake_case ) or eos_token_id < 0: raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" ) a : str = min_length a : Dict = eos_token_id def __call__( self : Optional[int] , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray , __snake_case : int ): # create boolean flag to decide if min length penalty should be applied a : Dict = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) a : str = jnp.where(__snake_case , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , __snake_case ) return scores class a__( lowerCamelCase__ ): def __init__( self : List[Any] , __snake_case : List[str] , __snake_case : Tuple ): a : Dict = list(__snake_case ) a : str = begin_index def __call__( self : int , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : int ): a : Tuple = 1 - jnp.bool_(cur_len - self.begin_index ) a : Optional[Any] = jnp.where(__snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , __snake_case ) return scores class a__( lowerCamelCase__ ): def __init__( self : Union[str, Any] , __snake_case : list ): a : Tuple = list(__snake_case ) def __call__( self : Optional[Any] , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray , __snake_case : int ): a : Tuple = scores.at[..., self.suppress_tokens].set(-float('inf' ) ) return scores class a__( lowerCamelCase__ ): def __init__( self : List[Any] , __snake_case : Any ): a : int = dict(__snake_case ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. a : Optional[int] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: a : str = force_token_array.at[index].set(__snake_case ) a : List[str] = jnp.intaa(__snake_case ) def __call__( self : List[str] , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray , __snake_case : int ): def _force_token(__snake_case : str ): a : int = scores.shape[0] a : str = self.force_token_array[generation_idx] a : int = jnp.ones_like(__snake_case , dtype=scores.dtype ) * -float('inf' ) a : str = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) a : List[Any] = lax.dynamic_update_slice(__snake_case , __snake_case , (0, current_token) ) return new_scores a : List[Any] = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(__snake_case ) , lambda: scores , ) , ) return scores class a__( lowerCamelCase__ ): def __init__( self : Optional[Any] , __snake_case : Optional[int] , __snake_case : int , __snake_case : Optional[Any] ): a : Dict = generate_config.eos_token_id a : Union[str, Any] = generate_config.no_timestamps_token_id a : Any = generate_config.no_timestamps_token_id + 1 a : Tuple = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(__snake_case , 'max_initial_timestamp_index' ): a : Any = generate_config.max_initial_timestamp_index else: a : Optional[int] = model_config.vocab_size if self.max_initial_timestamp_index is None: a : Optional[Any] = model_config.vocab_size def __call__( self : Dict , __snake_case : str , __snake_case : Dict , __snake_case : List[Any] ): # suppress <|notimestamps|> which is handled by without_timestamps a : Dict = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) ) def handle_pairs(__snake_case : Union[str, Any] , __snake_case : Any ): a : List[str] = jnp.where((cur_len - self.begin_index) >= 1 , __snake_case , __snake_case ) a : Union[str, Any] = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __snake_case , ) a : Tuple = jnp.where((cur_len - self.begin_index) < 2 , __snake_case , __snake_case ) a : Union[str, Any] = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , __snake_case , __snake_case , ) return jnp.where( __snake_case , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , __snake_case , ) a : Optional[int] = jax.vmap(__snake_case )(__snake_case , __snake_case ) a : Any = jnp.where(cur_len == self.begin_index , __snake_case , __snake_case ) a : Optional[int] = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __snake_case , ) a : Union[str, Any] = self.timestamp_begin + self.max_initial_timestamp_index a : Any = jnp.where( __snake_case , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , __snake_case , ) # if sum of probability over timestamps is above any other token, sample timestamp a : Optional[int] = jax.nn.log_softmax(__snake_case , axis=-1 ) def handle_cumulative_probs(__snake_case : Any , __snake_case : Optional[int] ): a : Optional[Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) a : List[str] = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , __snake_case , ) a : str = jax.vmap(__snake_case )(__snake_case , __snake_case ) return scores
297
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase: str = { 'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'], 'processing_mgp_str': ['MgpstrProcessor'], 'tokenization_mgp_str': ['MgpstrTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Optional[Any] = [ 'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST', 'MgpstrModel', 'MgpstrPreTrainedModel', 'MgpstrForSceneTextRecognition', ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys lowerCAmelCase: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
297
1
'''simple docstring''' def lowerCamelCase__ ( _A ): a : str = [0] * len(_A ) a : Optional[Any] = [] a : Union[str, Any] = [] a : List[Any] = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_A ) ): if indegree[i] == 0: queue.append(_A ) while queue: a : Tuple = queue.pop(0 ) cnt += 1 topo.append(_A ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(_A ) if cnt != len(_A ): print('Cycle exists' ) else: print(_A ) # Adjacency List of Graph lowerCAmelCase: Union[str, Any] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
297
'''simple docstring''' import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowerCAmelCase: Dict = logging.get_logger(__name__) lowerCAmelCase: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase: List[Any] = { 'vocab_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json', }, 'merges_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt', }, 'tokenizer_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json', }, } lowerCAmelCase: str = { 'allenai/led-base-16384': 1_6_3_8_4, } class a__( lowerCamelCase__ ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = LEDTokenizer lowercase__ = ["""input_ids""", """attention_mask"""] def __init__( self : List[Any] , __snake_case : Optional[Any]=None , __snake_case : List[str]=None , __snake_case : Tuple=None , __snake_case : Dict="replace" , __snake_case : int="<s>" , __snake_case : Any="</s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : List[str]="<pad>" , __snake_case : int="<mask>" , __snake_case : int=False , __snake_case : str=True , **__snake_case : Tuple , ): super().__init__( __snake_case , __snake_case , tokenizer_file=__snake_case , errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case , **__snake_case , ) a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , __snake_case ) != add_prefix_space: a : List[Any] = getattr(__snake_case , pre_tok_state.pop('type' ) ) a : Optional[Any] = add_prefix_space a : Optional[Any] = pre_tok_class(**__snake_case ) a : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` a : Dict = 'post_processor' a : int = getattr(self.backend_tokenizer , __snake_case , __snake_case ) if tokenizer_component_instance: a : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a : Any = tuple(state['sep'] ) if "cls" in state: a : Any = tuple(state['cls'] ) a : Optional[Any] = False if state.get('add_prefix_space' , __snake_case ) != add_prefix_space: a : Any = add_prefix_space a : Optional[Any] = True if state.get('trim_offsets' , __snake_case ) != trim_offsets: a : List[Any] = trim_offsets a : Union[str, Any] = True if changes_to_apply: a : int = getattr(__snake_case , state.pop('type' ) ) a : List[Any] = component_class(**__snake_case ) setattr(self.backend_tokenizer , __snake_case , __snake_case ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def lowercase_ ( self : Dict ): if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def lowercase_ ( self : Dict , __snake_case : List[str] ): a : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else value a : Optional[int] = value def lowercase_ ( self : Optional[Any] , *__snake_case : Any , **__snake_case : Union[str, Any] ): a : Dict = kwargs.get('is_split_into_words' , __snake_case ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*__snake_case , **__snake_case ) def lowercase_ ( self : Union[str, Any] , *__snake_case : Optional[int] , **__snake_case : List[str] ): a : Optional[int] = kwargs.get('is_split_into_words' , __snake_case ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.' ) return super()._encode_plus(*__snake_case , **__snake_case ) def lowercase_ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ): a : Union[str, Any] = self._tokenizer.model.save(__snake_case , name=__snake_case ) return tuple(__snake_case ) def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : int=None ): a : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ): a : int = [self.sep_token_id] a : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase_ ( self : List[str] , __snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ): a : Optional[Any] = super()._pad( encoded_inputs=__snake_case , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , ) # Load from model defaults if return_attention_mask is None: a : str = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: a : Any = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. a : Union[str, Any] = len(encoded_inputs['global_attention_mask'] ) != len(__snake_case ) if needs_to_be_padded: a : str = len(__snake_case ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` a : Dict = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": a : Union[str, Any] = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
297
1
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch lowerCAmelCase: Union[str, Any] = logging.get_logger(__name__) class a__( lowerCamelCase__ ): lowercase__ = ["""pixel_values"""] def __init__( self : List[str] , __snake_case : bool = True , __snake_case : Optional[Dict[str, int]] = None , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , **__snake_case : Optional[Any] , ): super().__init__(**__snake_case ) a : Union[str, Any] = size if size is not None else {'shortest_edge': 2_56} a : Optional[Any] = get_size_dict(__snake_case , default_to_square=__snake_case ) a : Optional[int] = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24} a : int = get_size_dict(__snake_case , param_name='crop_size' ) a : Optional[int] = do_resize a : List[Any] = size a : Optional[Any] = resample a : Union[str, Any] = do_center_crop a : Tuple = crop_size a : List[Any] = do_rescale a : Union[str, Any] = rescale_factor a : Union[str, Any] = do_normalize a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN a : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase_ ( self : Tuple , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ): a : List[Any] = get_size_dict(__snake_case , default_to_square=__snake_case ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) a : Optional[Any] = get_resize_output_image_size(__snake_case , size=size['shortest_edge'] , default_to_square=__snake_case ) return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase_ ( self : str , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[int] , ): a : Optional[int] = get_size_dict(__snake_case ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" ) return center_crop(__snake_case , size=(size['height'], size['width']) , data_format=__snake_case , **__snake_case ) def lowercase_ ( self : List[Any] , __snake_case : np.ndarray , __snake_case : float , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Union[str, Any] ): return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase_ ( self : List[Any] , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : str , ): return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase_ ( self : int , __snake_case : ImageInput , __snake_case : Optional[bool] = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[float] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__snake_case : List[Any] , ): a : int = do_resize if do_resize is not None else self.do_resize a : Tuple = size if size is not None else self.size a : Dict = get_size_dict(__snake_case , default_to_square=__snake_case ) a : Optional[Any] = resample if resample is not None else self.resample a : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop a : Union[str, Any] = crop_size if crop_size is not None else self.crop_size a : Dict = get_size_dict(__snake_case , param_name='crop_size' ) a : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale a : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor a : List[Any] = do_normalize if do_normalize is not None else self.do_normalize a : List[str] = image_mean if image_mean is not None else self.image_mean a : Any = image_std if image_std is not None else self.image_std a : str = make_list_of_images(__snake_case ) if not valid_images(__snake_case ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. a : List[Any] = [to_numpy_array(__snake_case ) for image in images] if do_resize: a : List[Any] = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images] if do_center_crop: a : List[Any] = [self.center_crop(image=__snake_case , size=__snake_case ) for image in images] if do_rescale: a : str = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images] if do_normalize: a : Union[str, Any] = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images] a : str = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images] a : List[Any] = {'pixel_values': images} return BatchFeature(data=__snake_case , tensor_type=__snake_case ) def lowercase_ ( self : Any , __snake_case : Any , __snake_case : List[Tuple] = None ): a : Optional[Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__snake_case ) != len(__snake_case ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(__snake_case ): a : List[str] = target_sizes.numpy() a : List[Any] = [] for idx in range(len(__snake_case ) ): a : Dict = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=__snake_case ) a : Optional[int] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__snake_case ) else: a : Dict = logits.argmax(dim=1 ) a : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
297
'''simple docstring''' import copy import os import cva import numpy as np from matplotlib import pyplot as plt class a__: def __init__( self : Tuple ): a : Optional[int] = '' a : Optional[Any] = '' a : str = [] a : int = 0 a : str = 2_56 a : Union[str, Any] = 0 a : Any = 0 a : Optional[int] = 0 a : List[str] = 0 def lowercase_ ( self : str , __snake_case : str ): a : Any = cva.imread(__snake_case , 0 ) a : Optional[Any] = copy.deepcopy(self.img ) a , a , a : int = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' ) a : Optional[int] = np.sum(__snake_case ) for i in range(len(__snake_case ) ): a : Optional[Any] = x[i] / self.k self.sk += prk a : str = (self.L - 1) * self.sk if self.rem != 0: a : Optional[int] = int(last % last ) a : int = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(__snake_case ) a : str = int(np.ma.count(self.img ) / self.img[1].size ) a : Optional[int] = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): a : Any = self.img[j][i] if num != self.last_list[num]: a : str = self.last_list[num] cva.imwrite('output_data/output.jpg' , self.img ) def lowercase_ ( self : Dict ): plt.hist(self.img.ravel() , 2_56 , [0, 2_56] ) def lowercase_ ( self : List[Any] ): cva.imshow('Output-Image' , self.img ) cva.imshow('Input-Image' , self.original_image ) cva.waitKey(50_00 ) cva.destroyAllWindows() if __name__ == "__main__": lowerCAmelCase: Optional[Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg') lowerCAmelCase: Tuple = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
297
1
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class a__( unittest.TestCase ): lowercase__ = MODEL_FOR_CAUSAL_LM_MAPPING lowercase__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def lowercase_ ( self : Any ): a : Union[str, Any] = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' ) # Using `do_sample=False` to force deterministic output a : Optional[int] = text_generator('This is a test' , do_sample=__snake_case ) self.assertEqual( __snake_case , [ { 'generated_text': ( 'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.' ' oscope. FiliFili@@' ) } ] , ) a : List[str] = text_generator(['This is a test', 'This is a second test'] ) self.assertEqual( __snake_case , [ [ { 'generated_text': ( 'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.' ' oscope. FiliFili@@' ) } ], [ { 'generated_text': ( 'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy' ' oscope. oscope. FiliFili@@' ) } ], ] , ) a : List[Any] = text_generator('This is a test' , do_sample=__snake_case , num_return_sequences=2 , return_tensors=__snake_case ) self.assertEqual( __snake_case , [ {'generated_token_ids': ANY(__snake_case )}, {'generated_token_ids': ANY(__snake_case )}, ] , ) a : List[Any] = text_generator.model.config.eos_token_id a : Optional[Any] = '<pad>' a : Any = text_generator( ['This is a test', 'This is a second test'] , do_sample=__snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=__snake_case , ) self.assertEqual( __snake_case , [ [ {'generated_token_ids': ANY(__snake_case )}, {'generated_token_ids': ANY(__snake_case )}, ], [ {'generated_token_ids': ANY(__snake_case )}, {'generated_token_ids': ANY(__snake_case )}, ], ] , ) @require_tf def lowercase_ ( self : Union[str, Any] ): a : Optional[int] = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' ) # Using `do_sample=False` to force deterministic output a : Optional[int] = text_generator('This is a test' , do_sample=__snake_case ) self.assertEqual( __snake_case , [ { 'generated_text': ( 'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵' ' please,' ) } ] , ) a : Optional[int] = text_generator(['This is a test', 'This is a second test'] , do_sample=__snake_case ) self.assertEqual( __snake_case , [ [ { 'generated_text': ( 'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵' ' please,' ) } ], [ { 'generated_text': ( 'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes' ' Cannes 閲閲Cannes Cannes Cannes 攵 please,' ) } ], ] , ) def lowercase_ ( self : str , __snake_case : Optional[Any] , __snake_case : int , __snake_case : int ): a : Any = TextGenerationPipeline(model=__snake_case , tokenizer=__snake_case ) return text_generator, ["This is a test", "Another test"] def lowercase_ ( self : Optional[int] ): a : Tuple = 'Hello I believe in' a : int = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' ) a : List[Any] = text_generator(__snake_case ) self.assertEqual( __snake_case , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , ) a : Tuple = text_generator(__snake_case , stop_sequence=' fe' ) self.assertEqual(__snake_case , [{'generated_text': 'Hello I believe in fe'}] ) def lowercase_ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[Any] ): a : Optional[int] = text_generator.model a : Dict = text_generator.tokenizer a : Tuple = text_generator('This is a test' ) self.assertEqual(__snake_case , [{'generated_text': ANY(__snake_case )}] ) self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) ) a : Optional[Any] = text_generator('This is a test' , return_full_text=__snake_case ) self.assertEqual(__snake_case , [{'generated_text': ANY(__snake_case )}] ) self.assertNotIn('This is a test' , outputs[0]['generated_text'] ) a : Dict = pipeline(task='text-generation' , model=__snake_case , tokenizer=__snake_case , return_full_text=__snake_case ) a : int = text_generator('This is a test' ) self.assertEqual(__snake_case , [{'generated_text': ANY(__snake_case )}] ) self.assertNotIn('This is a test' , outputs[0]['generated_text'] ) a : Optional[int] = text_generator('This is a test' , return_full_text=__snake_case ) self.assertEqual(__snake_case , [{'generated_text': ANY(__snake_case )}] ) self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) ) a : List[Any] = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=__snake_case ) self.assertEqual( __snake_case , [ [{'generated_text': ANY(__snake_case )}, {'generated_text': ANY(__snake_case )}], [{'generated_text': ANY(__snake_case )}, {'generated_text': ANY(__snake_case )}], ] , ) if text_generator.tokenizer.pad_token is not None: a : List[str] = text_generator( ['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=__snake_case ) self.assertEqual( __snake_case , [ [{'generated_text': ANY(__snake_case )}, {'generated_text': ANY(__snake_case )}], [{'generated_text': ANY(__snake_case )}, {'generated_text': ANY(__snake_case )}], ] , ) with self.assertRaises(__snake_case ): a : Optional[int] = text_generator('test' , return_full_text=__snake_case , return_text=__snake_case ) with self.assertRaises(__snake_case ): a : Tuple = text_generator('test' , return_full_text=__snake_case , return_tensors=__snake_case ) with self.assertRaises(__snake_case ): a : Union[str, Any] = text_generator('test' , return_text=__snake_case , return_tensors=__snake_case ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): a : str = text_generator('' ) self.assertEqual(__snake_case , [{'generated_text': ANY(__snake_case )}] ) else: with self.assertRaises((ValueError, AssertionError) ): a : Optional[int] = text_generator('' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. a : List[str] = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM'] if ( tokenizer.model_max_length < 1_00_00 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('This is a test' * 5_00 , max_new_tokens=20 ) a : List[Any] = text_generator('This is a test' * 5_00 , handle_long_generation='hole' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(__snake_case ): text_generator( 'This is a test' * 5_00 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def lowercase_ ( self : Union[str, Any] ): import torch # Classic `model_kwargs` a : Any = pipeline( model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) a : List[str] = pipe('This is a test' ) self.assertEqual( __snake_case , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) a : int = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) a : str = pipe('This is a test' ) self.assertEqual( __snake_case , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 a : int = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) a : int = pipe('This is a test' ) self.assertEqual( __snake_case , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) @require_torch @require_torch_gpu def lowercase_ ( self : List[Any] ): import torch a : int = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa ) pipe('This is a test' ) @require_torch @require_accelerate @require_torch_gpu def lowercase_ ( self : List[str] ): import torch a : int = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa ) pipe('This is a test' , do_sample=__snake_case , top_p=0.5 ) def lowercase_ ( self : Optional[int] ): a : Optional[int] = 'Hello world' a : int = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' ) if text_generator.model.framework == "tf": a : List[str] = logging.get_logger('transformers.generation.tf_utils' ) else: a : int = logging.get_logger('transformers.generation.utils' ) a : Any = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(__snake_case ) as cl: a : Optional[int] = text_generator(__snake_case , max_length=10 , max_new_tokens=1 ) self.assertIn(__snake_case , cl.out ) # The user only sets one -> no warning with CaptureLogger(__snake_case ) as cl: a : Tuple = text_generator(__snake_case , max_new_tokens=1 ) self.assertNotIn(__snake_case , cl.out ) with CaptureLogger(__snake_case ) as cl: a : Union[str, Any] = text_generator(__snake_case , max_length=10 ) self.assertNotIn(__snake_case , cl.out )
297
'''simple docstring''' import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class a__: def __init__( self : List[Any] , __snake_case : Union[str, Any] ): if isinstance(__snake_case , __snake_case ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden a : str = deepcopy(__snake_case ) elif os.path.exists(__snake_case ): with io.open(__snake_case , 'r' , encoding='utf-8' ) as f: a : Optional[Any] = json.load(__snake_case ) else: try: a : Any = baseaa.urlsafe_baadecode(__snake_case ).decode('utf-8' ) a : Union[str, Any] = json.loads(__snake_case ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" ) a : List[str] = config self.set_stage_and_offload() def lowercase_ ( self : List[str] ): # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs to know the dtype, and some other hparams. a : Dict = self.get_value('zero_optimization.stage' , -1 ) # offload a : str = False if self.is_zeroa() or self.is_zeroa(): a : Union[str, Any] = set(['cpu', 'nvme'] ) a : Optional[Any] = set( [ self.get_value('zero_optimization.offload_optimizer.device' ), self.get_value('zero_optimization.offload_param.device' ), ] ) if len(offload_devices & offload_devices_valid ) > 0: a : List[str] = True def lowercase_ ( self : Optional[Any] , __snake_case : Optional[Any] ): a : str = self.config # find the config node of interest if it exists a : List[str] = ds_key_long.split('.' ) a : Dict = nodes.pop() for node in nodes: a : List[Any] = config.get(__snake_case ) if config is None: return None, ds_key return config, ds_key def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any=None ): a , a : List[Any] = self.find_config_node(__snake_case ) if config is None: return default return config.get(__snake_case , __snake_case ) def lowercase_ ( self : int , __snake_case : Optional[Any] , __snake_case : List[str]=False ): a : Optional[Any] = self.config # find the config node of interest if it exists a : List[str] = ds_key_long.split('.' ) for node in nodes: a : str = config a : Dict = config.get(__snake_case ) if config is None: if must_exist: raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" ) else: return # if found remove it if parent_config is not None: parent_config.pop(__snake_case ) def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] ): a : Union[str, Any] = self.get_value(__snake_case ) return False if value is None else bool(__snake_case ) def lowercase_ ( self : Union[str, Any] , __snake_case : str ): a : Optional[Any] = self.get_value(__snake_case ) return False if value is None else not bool(__snake_case ) def lowercase_ ( self : Optional[Any] ): return self._stage == 2 def lowercase_ ( self : Union[str, Any] ): return self._stage == 3 def lowercase_ ( self : str ): return self._offload class a__: def __init__( self : Tuple , __snake_case : str ): a : Optional[Any] = engine def lowercase_ ( self : Union[str, Any] , __snake_case : str , **__snake_case : Tuple ): # runs backpropagation and handles mixed precision self.engine.backward(__snake_case , **__snake_case ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class a__( lowerCamelCase__ ): def __init__( self : str , __snake_case : List[str] ): super().__init__(__snake_case , device_placement=__snake_case , scaler=__snake_case ) a : Optional[Any] = hasattr(self.optimizer , 'overflow' ) def lowercase_ ( self : Dict , __snake_case : Dict=None ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def lowercase_ ( self : Optional[Any] ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def lowercase_ ( self : Tuple ): if self.__has_overflow__: return self.optimizer.overflow return False class a__( lowerCamelCase__ ): def __init__( self : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ): super().__init__(__snake_case , __snake_case ) def lowercase_ ( self : Any ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class a__: def __init__( self : List[Any] , __snake_case : str , __snake_case : Dict=0.001 , __snake_case : Union[str, Any]=0 , **__snake_case : List[Any] ): a : Optional[Any] = params a : str = lr a : List[str] = weight_decay a : str = kwargs class a__: def __init__( self : str , __snake_case : Optional[Any] , __snake_case : List[str]=None , __snake_case : Tuple=0 , **__snake_case : Any ): a : Union[str, Any] = optimizer a : Any = total_num_steps a : List[str] = warmup_num_steps a : int = kwargs
297
1
'''simple docstring''' import torch def lowerCamelCase__ ( ): if torch.cuda.is_available(): a : Dict = torch.cuda.device_count() else: a : Optional[Any] = 0 print(f"""Successfully ran on {num_gpus} GPUs""" ) if __name__ == "__main__": main()
297
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) lowerCAmelCase: int = [ 'cross_validation.py', 'gradient_accumulation.py', 'local_sgd.py', 'multi_process_metrics.py', 'memory.py', 'automatic_gradient_accumulation.py', 'fsdp_with_peak_mem_tracking.py', 'deepspeed_with_config_support.py', 'megatron_lm_gpt_pretraining.py', ] class a__( unittest.TestCase ): def lowercase_ ( self : int , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ): a : Optional[int] = None a : Tuple = os.path.abspath(os.path.join('examples' , 'by_feature' ) ) a : List[str] = os.path.abspath('examples' ) for item in os.listdir(__snake_case ): if item not in EXCLUDE_EXAMPLES: a : int = os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ) and ".py" in item_path: with self.subTest( tested_script=__snake_case , feature_script=__snake_case , tested_section='main()' if parser_only else 'training_function()' , ): a : List[Any] = compare_against_test( os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case ) a : Union[str, Any] = '\n'.join(__snake_case ) if special_strings is not None: for string in special_strings: a : Union[str, Any] = diff.replace(__snake_case , '' ) self.assertEqual(__snake_case , '' ) def lowercase_ ( self : Optional[Any] ): self.one_complete_example('complete_nlp_example.py' , __snake_case ) self.one_complete_example('complete_nlp_example.py' , __snake_case ) def lowercase_ ( self : Any ): a : Dict = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) ) a : int = [ ' ' * 16 + '{\n\n', ' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n', ' ' * 20 + '"f1": eval_metric["f1"],\n\n', ' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n', ' ' * 20 + '"epoch": epoch,\n\n', ' ' * 16 + '},\n\n', ' ' * 16 + 'step=epoch,\n', ' ' * 12, ' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n', ] self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case ) self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case ) @mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} ) class a__( lowerCamelCase__ ): lowercase__ = False @classmethod def lowercase_ ( cls : Optional[int] ): super().setUpClass() a : List[str] = tempfile.mkdtemp() a : Tuple = os.path.join(cls._tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) a : Optional[int] = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def lowercase_ ( cls : Optional[int] ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def lowercase_ ( self : Tuple ): a : Union[str, Any] = F""" examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) ) def lowercase_ ( self : Dict ): a : Union[str, Any] = F""" examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} """.split() a : int = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) ) def lowercase_ ( self : Any ): a : Tuple = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )} """.split() a : int = run_command(self._launch_args + testargs , return_stdout=__snake_case ) self.assertNotIn('epoch 0:' , __snake_case ) self.assertIn('epoch 1:' , __snake_case ) def lowercase_ ( self : int ): a : Optional[int] = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )} """.split() a : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=__snake_case ) if torch.cuda.is_available(): a : Any = torch.cuda.device_count() else: a : str = 1 if num_processes > 1: self.assertNotIn('epoch 0:' , __snake_case ) self.assertIn('epoch 1:' , __snake_case ) else: self.assertIn('epoch 0:' , __snake_case ) self.assertIn('epoch 1:' , __snake_case ) @slow def lowercase_ ( self : Tuple ): a : Tuple = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split() with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ): a : Any = run_command(self._launch_args + testargs , return_stdout=__snake_case ) a : Optional[Any] = re.findall('({.+})' , __snake_case ) a : str = [r for r in results if 'accuracy' in r][-1] a : str = ast.literal_eval(__snake_case ) self.assertGreaterEqual(results['accuracy'] , 0.75 ) def lowercase_ ( self : Optional[int] ): a : int = ['examples/by_feature/multi_process_metrics.py'] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def lowercase_ ( self : Optional[int] ): with tempfile.TemporaryDirectory() as tmpdir: a : Optional[Any] = F""" examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__snake_case , 'tracking' ) ) ) def lowercase_ ( self : List[str] ): a : Optional[Any] = ['examples/by_feature/gradient_accumulation.py'] run_command(self._launch_args + testargs ) def lowercase_ ( self : int ): a : Optional[Any] = ['examples/by_feature/local_sgd.py'] run_command(self._launch_args + testargs )
297
1
'''simple docstring''' class a__: def __init__( self : int , __snake_case : list[int] ): a : Optional[Any] = len(__snake_case ) a : List[str] = [0] * len_array if len_array > 0: a : Optional[int] = array[0] for i in range(1 , __snake_case ): a : List[str] = self.prefix_sum[i - 1] + array[i] def lowercase_ ( self : Optional[int] , __snake_case : int , __snake_case : int ): if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowercase_ ( self : int , __snake_case : int ): a : int = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__snake_case ) return False if __name__ == "__main__": import doctest doctest.testmod()
297
'''simple docstring''' import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList lowerCAmelCase: List[str] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class a__( lowerCamelCase__ ): def __init__( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : Optional[Any]=1 ): a : Union[str, Any] = tokenizer a : Union[str, Any] = dataset a : Any = len(__snake_case ) if n_tasks is None else n_tasks a : List[str] = n_copies def __iter__( self : str ): a : List[Any] = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() ) a : Dict = self.tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class a__( lowerCamelCase__ ): def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str ): a : Dict = start_length a : Dict = eof_strings a : str = tokenizer def __call__( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : Union[str, Any] ): a : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) a : Optional[int] = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(__snake_case ) def lowerCamelCase__ ( _A ): a : Optional[Any] = re.split('(%s)' % '|'.join(_A ) , _A ) # last string should be "" return "".join(string_list[:-2] ) def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A=20 , **_A ): a : Optional[Any] = defaultdict(_A ) # dict of list of generated tokens for step, batch in tqdm(enumerate(_A ) ): with torch.no_grad(): a : Optional[Any] = batch['ids'].shape[-1] a : Optional[Any] = accelerator.unwrap_model(_A ).generate( input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_A , **_A ) # each task is generated batch_size times a : Tuple = batch['task_id'].repeat(_A ) a : List[Any] = accelerator.pad_across_processes( _A , dim=1 , pad_index=tokenizer.pad_token_id ) a , a : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) ) a : List[str] = generated_tokens.cpu().numpy() a : int = generated_tasks.cpu().numpy() for task, generated_tokens in zip(_A , _A ): gen_token_dict[task].append(_A ) a : Any = [[] for _ in range(_A )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: a : Optional[int] = tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A ) code_gens[task].append(remove_last_block(_A ) ) return code_gens def lowerCamelCase__ ( ): # Setup configuration a : Dict = HfArgumentParser(_A ) a : Any = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric a : List[Any] = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing a : int = 'false' if args.num_workers is None: a : Dict = multiprocessing.cpu_count() # Use dataset load to feed to accelerate a : List[Any] = Accelerator() set_seed(args.seed , device_specific=_A ) # Load model and tokenizer a : Dict = AutoTokenizer.from_pretrained(args.model_ckpt ) a : str = tokenizer.eos_token a : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings a : Optional[Any] = { 'do_sample': args.do_sample, 'temperature': args.temperature, 'max_new_tokens': args.max_new_tokens, 'top_p': args.top_p, 'top_k': args.top_k, 'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _A , _A )] ), } # Load evaluation dataset and metric a : Optional[int] = load_dataset('openai_humaneval' ) a : Optional[Any] = load_metric('code_eval' ) a : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] ) a : Optional[Any] = args.n_samples // args.batch_size a : Any = TokenizedDataset(_A , human_eval['test'] , n_copies=_A , n_tasks=_A ) # do not confuse args.batch_size, which is actually the num_return_sequences a : int = DataLoader(_A , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: a : int = code_eval_metric.compute(references=[''] , predictions=[['']] ) except ValueError as exception: print( 'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`' ' flag to enable code evaluation.' ) raise exception a , a : int = accelerator.prepare(_A , _A ) a : int = complete_code( _A , _A , _A , _A , n_tasks=_A , batch_size=args.batch_size , **_A , ) if accelerator.is_main_process: a : List[str] = [] for task in tqdm(range(_A ) ): a : int = human_eval['test'][task]['test'] a : int = f"""check({human_eval["test"][task]["entry_point"]})""" references.append('\n' + test_func + '\n' + entry_point ) # Evaluate completions with "code_eval" metric a , a : Tuple = code_eval_metric.compute( references=_A , predictions=_A , num_workers=args.num_workers ) print(f"""Results: {pass_at_k}""" ) # Save results to json file with open(args.output_file , 'w' ) as fp: json.dump(_A , _A ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
297
1
'''simple docstring''' from __future__ import annotations from cmath import sqrt def lowerCamelCase__ ( _A , _A , _A ): if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) a : int = b * b - 4 * a * c a : List[str] = (-b + sqrt(_A )) / (2 * a) a : int = (-b - sqrt(_A )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def lowerCamelCase__ ( ): a , a : List[str] = quadratic_roots(a=5 , b=6 , c=1 ) print(f"""The solutions are: {solutiona} and {solutiona}""" ) if __name__ == "__main__": main()
297
'''simple docstring''' import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def lowerCamelCase__ ( _A , _A , _A ): if isinstance(_A , torch.Tensor ): return image elif isinstance(_A , PIL.Image.Image ): a : Any = [image] if isinstance(image[0] , PIL.Image.Image ): a : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image] a : int = np.concatenate(_A , axis=0 ) a : int = np.array(_A ).astype(np.floataa ) / 255.0 a : str = image.transpose(0 , 3 , 1 , 2 ) a : str = 2.0 * image - 1.0 a : Optional[int] = torch.from_numpy(_A ) elif isinstance(image[0] , torch.Tensor ): a : Optional[Any] = torch.cat(_A , dim=0 ) return image def lowerCamelCase__ ( _A , _A , _A , _A=0.9995 ): if not isinstance(_A , np.ndarray ): a : Dict = True a : Optional[Any] = va.device a : Optional[int] = va.cpu().numpy() a : Union[str, Any] = va.cpu().numpy() a : Any = np.sum(va * va / (np.linalg.norm(_A ) * np.linalg.norm(_A )) ) if np.abs(_A ) > DOT_THRESHOLD: a : Any = (1 - t) * va + t * va else: a : Any = np.arccos(_A ) a : Tuple = np.sin(_A ) a : Optional[Any] = theta_a * t a : List[Any] = np.sin(_A ) a : Dict = np.sin(theta_a - theta_t ) / sin_theta_a a : int = sin_theta_t / sin_theta_a a : Any = sa * va + sa * va if inputs_are_torch: a : Dict = torch.from_numpy(_A ).to(_A ) return va def lowerCamelCase__ ( _A , _A ): a : Optional[int] = F.normalize(_A , dim=-1 ) a : str = F.normalize(_A , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def lowerCamelCase__ ( _A , _A ): for param in model.parameters(): a : int = value class a__( lowerCamelCase__ ): def __init__( self : str , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __snake_case : CLIPFeatureExtractor , __snake_case : List[str]=None , __snake_case : List[str]=None , __snake_case : List[Any]=None , ): super().__init__() self.register_modules( vae=__snake_case , text_encoder=__snake_case , clip_model=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , feature_extractor=__snake_case , coca_model=__snake_case , coca_tokenizer=__snake_case , coca_transform=__snake_case , ) a : Optional[Any] = ( feature_extractor.size if isinstance(feature_extractor.size , __snake_case ) else feature_extractor.size['shortest_edge'] ) a : Optional[int] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , __snake_case ) set_requires_grad(self.clip_model , __snake_case ) def lowercase_ ( self : int , __snake_case : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory a : Union[str, Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__snake_case ) def lowercase_ ( self : Union[str, Any] ): self.enable_attention_slicing(__snake_case ) def lowercase_ ( self : Optional[Any] ): set_requires_grad(self.vae , __snake_case ) def lowercase_ ( self : Tuple ): set_requires_grad(self.vae , __snake_case ) def lowercase_ ( self : int ): set_requires_grad(self.unet , __snake_case ) def lowercase_ ( self : Union[str, Any] ): set_requires_grad(self.unet , __snake_case ) def lowercase_ ( self : int , __snake_case : Dict , __snake_case : str , __snake_case : Optional[int] ): # get the original timestep using init_timestep a : Optional[Any] = min(int(num_inference_steps * strength ) , __snake_case ) a : Union[str, Any] = max(num_inference_steps - init_timestep , 0 ) a : List[Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def lowercase_ ( self : Dict , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any]=None ): if not isinstance(__snake_case , torch.Tensor ): raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(__snake_case )}""" ) a : Optional[Any] = image.to(device=__snake_case , dtype=__snake_case ) if isinstance(__snake_case , __snake_case ): a : Optional[int] = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case ) ] a : Optional[Any] = torch.cat(__snake_case , dim=0 ) else: a : Union[str, Any] = self.vae.encode(__snake_case ).latent_dist.sample(__snake_case ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : List[str] = 0.18215 * init_latents a : str = init_latents.repeat_interleave(__snake_case , dim=0 ) a : Dict = randn_tensor(init_latents.shape , generator=__snake_case , device=__snake_case , dtype=__snake_case ) # get latents a : Dict = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case ) a : int = init_latents return latents def lowercase_ ( self : List[str] , __snake_case : Dict ): a : List[Any] = self.coca_transform(__snake_case ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): a : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) a : Union[str, Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' ) def lowercase_ ( self : Tuple , __snake_case : Any , __snake_case : Optional[Any] ): a : List[Any] = self.feature_extractor.preprocess(__snake_case ) a : Optional[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half() a : int = self.clip_model.get_image_features(__snake_case ) a : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case ) a : Tuple = image_embeddings_clip.repeat_interleave(__snake_case , dim=0 ) return image_embeddings_clip @torch.enable_grad() def lowercase_ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[Any] , ): a : Optional[Any] = latents.detach().requires_grad_() a : List[Any] = self.scheduler.scale_model_input(__snake_case , __snake_case ) # predict the noise residual a : Any = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): a : int = self.scheduler.alphas_cumprod[timestep] a : Any = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf a : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 a : Tuple = torch.sqrt(__snake_case ) a : str = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , __snake_case ): a : List[Any] = self.scheduler.sigmas[index] a : Optional[int] = latents - sigma * noise_pred else: raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : Union[str, Any] = 1 / 0.18215 * sample a : str = self.vae.decode(__snake_case ).sample a : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) a : Tuple = transforms.Resize(self.feature_extractor_size )(__snake_case ) a : List[str] = self.normalize(__snake_case ).to(latents.dtype ) a : List[str] = self.clip_model.get_image_features(__snake_case ) a : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case ) a : int = spherical_dist_loss(__snake_case , __snake_case ).mean() * clip_guidance_scale a : List[str] = -torch.autograd.grad(__snake_case , __snake_case )[0] if isinstance(self.scheduler , __snake_case ): a : List[Any] = latents.detach() + grads * (sigma**2) a : Optional[int] = noise_pred_original else: a : List[Any] = noise_pred_original - torch.sqrt(__snake_case ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : Optional[int] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[int] = 5_12 , __snake_case : Optional[int] = 5_12 , __snake_case : float = 0.6 , __snake_case : Optional[int] = 50 , __snake_case : Optional[float] = 7.5 , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[float] = 1_00 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : float = 0.8 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , ): if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size: raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(__snake_case )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(__snake_case , torch.Generator ) and batch_size > 1: a : Dict = [generator] + [None] * (batch_size - 1) a : Any = [ ('model', self.coca_model is None), ('tokenizer', self.coca_tokenizer is None), ('transform', self.coca_transform is None), ] a : List[str] = [x[0] for x in coca_is_none if x[1]] a : List[str] = ', '.join(__snake_case ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(__snake_case ): raise ValueError( F"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) a : int = self.get_image_description(__snake_case ) if style_prompt is None: if len(__snake_case ): raise ValueError( F"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) a : Union[str, Any] = self.get_image_description(__snake_case ) # get prompt text embeddings for content and style a : Optional[Any] = self.tokenizer( __snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , ) a : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] a : Dict = self.tokenizer( __snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , ) a : Dict = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] a : Any = slerp(__snake_case , __snake_case , __snake_case ) # duplicate text embeddings for each generation per prompt a : Optional[Any] = text_embeddings.repeat_interleave(__snake_case , dim=0 ) # set timesteps a : int = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) a : Any = {} if accepts_offset: a : Optional[Any] = 1 self.scheduler.set_timesteps(__snake_case , **__snake_case ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) a , a : Tuple = self.get_timesteps(__snake_case , __snake_case , self.device ) a : Optional[int] = timesteps[:1].repeat(__snake_case ) # Preprocess image a : Optional[Any] = preprocess(__snake_case , __snake_case , __snake_case ) a : List[Any] = self.prepare_latents( __snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case ) a : str = preprocess(__snake_case , __snake_case , __snake_case ) a : Union[str, Any] = self.prepare_latents( __snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case ) a : Union[str, Any] = slerp(__snake_case , __snake_case , __snake_case ) if clip_guidance_scale > 0: a : Dict = self.get_clip_image_embeddings(__snake_case , __snake_case ) a : int = self.get_clip_image_embeddings(__snake_case , __snake_case ) a : List[str] = slerp( __snake_case , __snake_case , __snake_case ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. a : int = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: a : Any = content_text_input.input_ids.shape[-1] a : List[Any] = self.tokenizer([''] , padding='max_length' , max_length=__snake_case , return_tensors='pt' ) a : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt a : Dict = uncond_embeddings.repeat_interleave(__snake_case , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes a : Any = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. a : List[str] = (batch_size, self.unet.config.in_channels, height // 8, width // 8) a : List[str] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps a : int = torch.randn(__snake_case , generator=__snake_case , device='cpu' , dtype=__snake_case ).to( self.device ) else: a : Optional[int] = torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case ) else: if latents.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) a : List[str] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler a : Any = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] a : Optional[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) a : Union[str, Any] = {} if accepts_eta: a : List[str] = eta # check if the scheduler accepts generator a : List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: a : Any = generator with self.progress_bar(total=__snake_case ): for i, t in enumerate(__snake_case ): # expand the latents if we are doing classifier free guidance a : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a : Dict = self.scheduler.scale_model_input(__snake_case , __snake_case ) # predict the noise residual a : List[Any] = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample # perform classifier free guidance if do_classifier_free_guidance: a , a : List[str] = noise_pred.chunk(2 ) a : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: a : Optional[Any] = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) a , a : Union[str, Any] = self.cond_fn( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) # compute the previous noisy sample x_t -> x_t-1 a : Any = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : Tuple = 1 / 0.18215 * latents a : Optional[int] = self.vae.decode(__snake_case ).sample a : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) a : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a : str = self.numpy_to_pil(__snake_case ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case )
297
1
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=lowerCamelCase__ ) class a__( lowerCamelCase__ ): lowercase__ = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) lowercase__ = Features({"""audio""": Audio()} ) lowercase__ = Features({"""labels""": ClassLabel} ) lowercase__ = "audio" lowercase__ = "labels" def lowercase_ ( self : Dict , __snake_case : Optional[int] ): if self.label_column not in features: raise ValueError(F"""Column {self.label_column} is not present in features.""" ) if not isinstance(features[self.label_column] , __snake_case ): raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" ) a : str = copy.deepcopy(self ) a : Dict = self.label_schema.copy() a : Optional[int] = features[self.label_column] a : str = label_schema return task_template @property def lowercase_ ( self : Tuple ): return { self.audio_column: "audio", self.label_column: "labels", }
297
'''simple docstring''' def lowerCamelCase__ ( _A , _A , _A , _A , _A , ): a : Dict = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('All input parameters must be positive' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('Relative densities cannot be greater than one' ) else: a : Union[str, Any] = 1 - (matter_density + radiation_density + dark_energy) a : Union[str, Any] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) a : int = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation lowerCAmelCase: Optional[Any] = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
297
1
'''simple docstring''' import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class a__( unittest.TestCase ): lowercase__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def lowercase_ ( self : Dict , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : str ): a : str = hf_hub_download( repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' ) a : Union[str, Any] = VideoClassificationPipeline(model=__snake_case , image_processor=__snake_case , top_k=2 ) a : Optional[Any] = [ example_video_filepath, 'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4', ] return video_classifier, examples def lowercase_ ( self : List[str] , __snake_case : List[str] , __snake_case : Tuple ): for example in examples: a : Optional[Any] = video_classifier(__snake_case ) self.assertEqual( __snake_case , [ {'score': ANY(__snake_case ), 'label': ANY(__snake_case )}, {'score': ANY(__snake_case ), 'label': ANY(__snake_case )}, ] , ) @require_torch def lowercase_ ( self : Any ): a : Union[str, Any] = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification' a : Tuple = VideoMAEFeatureExtractor( size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} ) a : Dict = pipeline( 'video-classification' , model=__snake_case , feature_extractor=__snake_case , frame_sampling_rate=4 ) a : Union[str, Any] = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' ) a : Optional[Any] = video_classifier(__snake_case , top_k=2 ) self.assertEqual( nested_simplify(__snake_case , decimals=4 ) , [{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}] , ) a : int = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(__snake_case , decimals=4 ) , [ [{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}], [{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}], ] , ) @require_tf def lowercase_ ( self : Tuple ): pass
297
'''simple docstring''' from __future__ import annotations import math class a__: def __init__( self : List[str] , __snake_case : int ): a : str = size # approximate the overall size of segment tree with given value a : Optional[int] = [0 for i in range(0 , 4 * size )] # create array to store lazy update a : Any = [0 for i in range(0 , 4 * size )] a : Dict = [0 for i in range(0 , 4 * size )] # flag for lazy update def lowercase_ ( self : int , __snake_case : int ): return idx * 2 def lowercase_ ( self : Dict , __snake_case : int ): return idx * 2 + 1 def lowercase_ ( self : Dict , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : list[int] ): if left_element == right_element: a : Tuple = a[left_element - 1] else: a : Tuple = (left_element + right_element) // 2 self.build(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case ) self.build(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case ) a : Union[str, Any] = max( self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] ) def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ): if self.flag[idx] is True: a : int = self.lazy[idx] a : Union[str, Any] = False if left_element != right_element: a : Dict = self.lazy[idx] a : int = self.lazy[idx] a : Tuple = True a : Optional[Any] = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: a : int = val if left_element != right_element: a : int = val a : Dict = val a : List[str] = True a : List[str] = True return True a : Tuple = (left_element + right_element) // 2 self.update(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) self.update(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case , __snake_case ) a : Optional[int] = max( self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] ) return True def lowercase_ ( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ): if self.flag[idx] is True: a : str = self.lazy[idx] a : Optional[Any] = False if left_element != right_element: a : Dict = self.lazy[idx] a : Union[str, Any] = self.lazy[idx] a : Dict = True a : int = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] a : Dict = (left_element + right_element) // 2 a : Optional[int] = self.query(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case ) a : Union[str, Any] = self.query(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case ) return max(__snake_case , __snake_case ) def __str__( self : Any ): return str([self.query(1 , 1 , self.size , __snake_case , __snake_case ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": lowerCAmelCase: Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] lowerCAmelCase: int = 1_5 lowerCAmelCase: Optional[int] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
297
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase: List[Any] = logging.get_logger(__name__) lowerCAmelCase: List[Any] = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class a__( lowerCamelCase__ ): lowercase__ = """roberta""" def __init__( self : Tuple , __snake_case : List[str]=5_02_65 , __snake_case : int=7_68 , __snake_case : Union[str, Any]=12 , __snake_case : Dict=12 , __snake_case : Tuple=30_72 , __snake_case : Optional[Any]="gelu" , __snake_case : str=0.1 , __snake_case : Any=0.1 , __snake_case : str=5_12 , __snake_case : int=2 , __snake_case : Any=0.02 , __snake_case : int=1e-1_2 , __snake_case : str=1 , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=2 , __snake_case : Optional[int]="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=None , **__snake_case : str , ): super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) a : List[str] = vocab_size a : str = hidden_size a : Tuple = num_hidden_layers a : Dict = num_attention_heads a : List[Any] = hidden_act a : str = intermediate_size a : Union[str, Any] = hidden_dropout_prob a : Optional[Any] = attention_probs_dropout_prob a : Any = max_position_embeddings a : Optional[int] = type_vocab_size a : str = initializer_range a : List[Any] = layer_norm_eps a : Optional[int] = position_embedding_type a : Dict = use_cache a : Any = classifier_dropout class a__( lowerCamelCase__ ): @property def lowercase_ ( self : int ): if self.task == "multiple-choice": a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: a : str = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
297
'''simple docstring''' def lowerCamelCase__ ( _A , _A ): while second != 0: a : Union[str, Any] = first & second first ^= second a : Tuple = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase: Optional[int] = int(input('Enter the first number: ').strip()) lowerCAmelCase: Union[str, Any] = int(input('Enter the second number: ').strip()) print(F"{add(first, second) = }")
297
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class a__( lowerCamelCase__ ): lowercase__ = """Salesforce/blip-image-captioning-base""" lowercase__ = ( """This is a tool that generates a description of an image. It takes an input named `image` which should be the """ """image to caption, and returns a text that contains the description in English.""" ) lowercase__ = """image_captioner""" lowercase__ = AutoModelForVisionaSeq lowercase__ = ["""image"""] lowercase__ = ["""text"""] def __init__( self : List[Any] , *__snake_case : Tuple , **__snake_case : Any ): requires_backends(self , ['vision'] ) super().__init__(*__snake_case , **__snake_case ) def lowercase_ ( self : Optional[int] , __snake_case : "Image" ): return self.pre_processor(images=__snake_case , return_tensors='pt' ) def lowercase_ ( self : List[Any] , __snake_case : Tuple ): return self.model.generate(**__snake_case ) def lowercase_ ( self : List[Any] , __snake_case : int ): return self.pre_processor.batch_decode(__snake_case , skip_special_tokens=__snake_case )[0].strip()
297
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase__ ( _A , _A ): assert isinstance(_A , _A ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def lowerCamelCase__ ( _A , _A , _A ): a : str = tmp_path / 'cache' a : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def lowerCamelCase__ ( _A , _A , _A ): a : str = tmp_path / 'cache' a : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : Dict = features.copy() if features else default_expected_features a : Union[str, Any] = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( 'features' , [ None, {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}, ] , ) def lowerCamelCase__ ( _A , _A , _A ): a : Tuple = tmp_path / 'cache' a : Optional[Any] = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'} a : Optional[int] = features.copy() if features else default_expected_features a : Dict = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) a : Optional[int] = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowerCamelCase__ ( _A , _A ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} a : Dict = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'} a : int = features.copy() a : List[Any] = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) a : Dict = tmp_path / 'cache' a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def lowerCamelCase__ ( _A , _A , _A ): a : Dict = tmp_path / 'cache' a : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read() _check_json_dataset(_A , _A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def lowerCamelCase__ ( _A , _A , _A ): if issubclass(_A , _A ): a : Optional[int] = jsonl_path elif issubclass(_A , _A ): a : Optional[int] = [jsonl_path] a : List[str] = tmp_path / 'cache' a : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) def lowerCamelCase__ ( _A , _A , _A=("train",) ): assert isinstance(_A , _A ) for split in splits: a : str = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def lowerCamelCase__ ( _A , _A , _A ): a : Dict = tmp_path / 'cache' a : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): a : int = JsonDatasetReader({'train': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def lowerCamelCase__ ( _A , _A , _A ): a : Dict = tmp_path / 'cache' a : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : List[Any] = features.copy() if features else default_expected_features a : Any = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) a : List[str] = JsonDatasetReader({'train': jsonl_path} , features=_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def lowerCamelCase__ ( _A , _A , _A ): if split: a : Any = {split: jsonl_path} else: a : List[Any] = 'train' a : List[str] = {'train': jsonl_path, 'test': jsonl_path} a : List[Any] = tmp_path / 'cache' a : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase__ ( _A ): return json.load(_A ) def lowerCamelCase__ ( _A ): return [json.loads(_A ) for line in buffer] class a__: @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def lowercase_ ( self : Tuple , __snake_case : int , __snake_case : Optional[int] , __snake_case : Any ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case ).write() buffer.seek(0 ) a : List[str] = load_json_function(__snake_case ) assert isinstance(__snake_case , __snake_case ) assert isinstance(exported_content[0] , __snake_case ) assert len(__snake_case ) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def lowercase_ ( self : Tuple , __snake_case : Tuple , __snake_case : Any , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case ).write() buffer.seek(0 ) a : int = load_json(__snake_case ) assert isinstance(__snake_case , __snake_case ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(__snake_case ) == 10 @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def lowercase_ ( self : List[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Optional[Any] ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , num_proc=2 ).write() buffer.seek(0 ) a : List[Any] = load_json_function(__snake_case ) assert isinstance(__snake_case , __snake_case ) assert isinstance(exported_content[0] , __snake_case ) assert len(__snake_case ) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def lowercase_ ( self : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : int , __snake_case : List[Any] , __snake_case : Dict ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case , num_proc=2 ).write() buffer.seek(0 ) a : int = load_json(__snake_case ) assert isinstance(__snake_case , __snake_case ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(__snake_case ) == 10 def lowercase_ ( self : List[str] , __snake_case : str ): with pytest.raises(__snake_case ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , num_proc=0 ) @pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] ) def lowercase_ ( self : Tuple , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Optional[int] ): a : Tuple = tmp_path_factory.mktemp('data' ) / F"""test.json.{extension}""" a : List[Any] = str(shared_datadir / F"""test_file.json.{extension}""" ) JsonDatasetWriter(__snake_case , __snake_case , compression=__snake_case ).write() with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f: a : Union[str, Any] = f.read() with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f: a : Union[str, Any] = f.read() assert exported_content == original_content
297
1
'''simple docstring''' # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def lowerCamelCase__ ( _A , _A , _A , _A ): a : Optional[int] = { 'en': 'Machine learning is great, isn\'t it?', 'ru': 'Машинное обучение - это здорово, не так ли?', 'de': 'Maschinelles Lernen ist großartig, nicht wahr?', } # BLUE scores as follows: # "pair": [fairseq, transformers] a : Union[str, Any] = { 'wmt16-en-de-dist-12-1': [28.3, 27.52], 'wmt16-en-de-dist-6-1': [27.4, 27.11], 'wmt16-en-de-12-1': [26.9, 25.75], } a : Union[str, Any] = f"""{src_lang}-{tgt_lang}""" a : Dict = f""" --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = \"allenai/{model_name}\" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = \"{texts[src_lang]}\" input_ids = tokenizer.encode(input, return_tensors=\"pt\") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` """ model_card_dir.mkdir(parents=_A , exist_ok=_A ) a : int = os.path.join(_A , 'README.md' ) print(f"""Generating {path}""" ) with open(_A , 'w' , encoding='utf-8' ) as f: f.write(_A ) # make sure we are under the root of the project lowerCAmelCase: int = Path(__file__).resolve().parent.parent.parent lowerCAmelCase: List[str] = repo_dir / 'model_cards' for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: lowerCAmelCase: Optional[int] = model_cards_dir / 'allenai' / model_name write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
297
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def lowerCamelCase__ ( _A = "laptop" ): a : Any = f"""https://www.amazon.in/laptop/s?k={product}""" a : Tuple = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36', 'Accept-Language': 'en-US, en;q=0.5', } a : Any = BeautifulSoup(requests.get(_A , headers=_A ).text ) # Initialize a Pandas dataframe with the column titles a : Any = DataFrame( columns=[ 'Product Title', 'Product Link', 'Current Price of the product', 'Product Rating', 'MRP of the product', 'Discount', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( 'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ): try: a : Optional[int] = item.ha.text a : str = 'https://www.amazon.in/' + item.ha.a['href'] a : List[str] = item.find('span' , attrs={'class': 'a-offscreen'} ).text try: a : Optional[Any] = item.find('span' , attrs={'class': 'a-icon-alt'} ).text except AttributeError: a : Union[str, Any] = 'Not available' try: a : str = ( '₹' + item.find( 'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1] ) except AttributeError: a : int = '' try: a : Union[str, Any] = float( ( ( float(product_mrp.strip('₹' ).replace(',' , '' ) ) - float(product_price.strip('₹' ).replace(',' , '' ) ) ) / float(product_mrp.strip('₹' ).replace(',' , '' ) ) ) * 100 ) except ValueError: a : Any = float('nan' ) except AttributeError: pass a : Any = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] a : Any = ' ' a : List[str] = ' ' data_frame.index += 1 return data_frame if __name__ == "__main__": lowerCAmelCase: str = 'headphones' get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
297
1
'''simple docstring''' from timeit import timeit lowerCAmelCase: Union[str, Any] = { 'MALAYALAM': True, 'String': False, 'rotor': True, 'level': True, 'A': True, 'BB': True, 'ABC': False, 'amanaplanacanalpanama': True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def lowerCamelCase__ ( _A ): a : str = 0 a : Union[str, Any] = len(_A ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def lowerCamelCase__ ( _A ): a : int = len(_A ) // 2 a : Tuple = len(_A ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(_A ) ) def lowerCamelCase__ ( _A ): if len(_A ) <= 2: return True if s[0] == s[len(_A ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def lowerCamelCase__ ( _A ): return s == s[::-1] def lowerCamelCase__ ( _A ): a : str = f"""all({name}(key) is value for key, value in test_data.items())""" a : List[str] = f"""from __main__ import test_data, {name}""" a : int = 50_0000 a : Optional[Any] = timeit(stmt=_A , setup=_A , number=_A ) print(f"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(F"{key:21} {value}") print('a man a plan a canal panama') # finished 500,000 runs in 0.46793 seconds benchmark_function('is_palindrome_slice') # finished 500,000 runs in 0.85234 seconds benchmark_function('is_palindrome') # finished 500,000 runs in 1.32028 seconds benchmark_function('is_palindrome_recursive') # finished 500,000 runs in 2.08679 seconds benchmark_function('is_palindrome_traversal')
297
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase__ = StableUnCLIPImgaImgPipeline lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowercase__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase__ = frozenset([] ) def lowercase_ ( self : int ): a : Dict = 32 a : str = embedder_hidden_size # image encoding components a : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) a : Dict = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=__snake_case , projection_dim=__snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) a : Dict = StableUnCLIPImageNormalizer(embedding_dim=__snake_case ) a : Optional[int] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' ) torch.manual_seed(0 ) a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) torch.manual_seed(0 ) a : Tuple = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) a : Union[str, Any] = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , ) torch.manual_seed(0 ) a : List[Any] = DDIMScheduler( beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=__snake_case , steps_offset=1 , ) torch.manual_seed(0 ) a : List[str] = AutoencoderKL() a : str = { # image encoding components 'feature_extractor': feature_extractor, 'image_encoder': image_encoder.eval(), # image noising components 'image_normalizer': image_normalizer.eval(), 'image_noising_scheduler': image_noising_scheduler, # regular denoising components 'tokenizer': tokenizer, 'text_encoder': text_encoder.eval(), 'unet': unet.eval(), 'scheduler': scheduler, 'vae': vae.eval(), } return components def lowercase_ ( self : Tuple , __snake_case : List[str] , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=True ): if str(__snake_case ).startswith('mps' ): a : Tuple = torch.manual_seed(__snake_case ) else: a : List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) a : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case ) if pil_image: a : Optional[Any] = input_image * 0.5 + 0.5 a : Optional[Any] = input_image.clamp(0 , 1 ) a : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() a : int = DiffusionPipeline.numpy_to_pil(__snake_case )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def lowercase_ ( self : Optional[Any] ): a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator a : Union[str, Any] = self.get_dummy_components() a : Any = StableUnCLIPImgaImgPipeline(**__snake_case ) a : Tuple = sd_pipe.to(__snake_case ) sd_pipe.set_progress_bar_config(disable=__snake_case ) a : Union[str, Any] = self.get_dummy_inputs(__snake_case ) inputs.update({'image_embeds': None} ) a : str = sd_pipe(**__snake_case ).images a : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) a : Optional[int] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase_ ( self : List[str] ): a : int = torch_device in ['cpu', 'mps'] self._test_attention_slicing_forward_pass(test_max_difference=__snake_case ) def lowercase_ ( self : int ): a : Optional[int] = torch_device in ['cpu', 'mps'] self._test_inference_batch_single_identical(test_max_difference=__snake_case ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowercase_ ( self : Dict ): self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__snake_case ) @slow @require_torch_gpu class a__( unittest.TestCase ): def lowercase_ ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : Optional[Any] ): a : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' ) a : Union[str, Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' ) a : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained( 'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 ) a : Optional[int] = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' ) a : List[str] = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__snake_case , __snake_case ) def lowercase_ ( self : Optional[int] ): a : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' ) a : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' ) a : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained( 'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) a : str = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' ) a : List[str] = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__snake_case , __snake_case ) def lowercase_ ( self : Any ): a : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained( 'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa ) a : Optional[Any] = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a : Optional[int] = pipe( __snake_case , 'anime turtle' , num_inference_steps=2 , output_type='np' , ) a : int = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
297
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, PNDMScheduler, StableDiffusionLDMaDPipeline, UNetaDConditionModel, ) from diffusers.utils import nightly, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS enable_full_determinism() class a__( unittest.TestCase ): lowercase__ = StableDiffusionLDMaDPipeline lowercase__ = TEXT_TO_IMAGE_PARAMS lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS def lowercase_ ( self : Union[str, Any] ): torch.manual_seed(0 ) a : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) a : Tuple = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , ) torch.manual_seed(0 ) a : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) a : Any = CLIPTextModel(__snake_case ) a : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) a : List[str] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def lowercase_ ( self : Optional[Any] , __snake_case : Dict , __snake_case : int=0 ): if str(__snake_case ).startswith('mps' ): a : str = torch.manual_seed(__snake_case ) else: a : List[str] = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) a : str = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def lowercase_ ( self : Union[str, Any] ): a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator a : Dict = self.get_dummy_components() a : Dict = StableDiffusionLDMaDPipeline(**__snake_case ) a : Dict = ldmad_pipe.to(__snake_case ) ldmad_pipe.set_progress_bar_config(disable=__snake_case ) a : Dict = self.get_dummy_inputs(__snake_case ) a : List[Any] = ldmad_pipe(**__snake_case ) a , a : Any = output.rgb, output.depth a : List[Any] = rgb[0, -3:, -3:, -1] a : int = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) a : Optional[Any] = np.array( [0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] ) a : Optional[Any] = np.array([103.46727, 85.812004, 87.849236] ) assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2 assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2 def lowercase_ ( self : Optional[int] ): a : Tuple = self.get_dummy_components() a : int = StableDiffusionLDMaDPipeline(**__snake_case ) a : List[Any] = ldmad_pipe.to(__snake_case ) ldmad_pipe.set_progress_bar_config(disable=__snake_case ) a : Union[str, Any] = self.get_dummy_inputs(__snake_case ) a : Tuple = 3 * [inputs['prompt']] # forward a : Union[str, Any] = ldmad_pipe(**__snake_case ) a , a : Optional[int] = output.rgb, output.depth a : Dict = rgb_slice_a[0, -3:, -3:, -1] a : int = depth_slice_a[0, -3:, -1] a : Optional[int] = self.get_dummy_inputs(__snake_case ) a : int = 3 * [inputs.pop('prompt' )] a : Optional[Any] = ldmad_pipe.tokenizer( __snake_case , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , ) a : Tuple = text_inputs['input_ids'].to(__snake_case ) a : List[Any] = ldmad_pipe.text_encoder(__snake_case )[0] a : Optional[Any] = prompt_embeds # forward a : Dict = ldmad_pipe(**__snake_case ) a , a : Dict = output.rgb, output.depth a : Any = rgb_slice_a[0, -3:, -3:, -1] a : List[Any] = depth_slice_a[0, -3:, -1] assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4 assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4 def lowercase_ ( self : List[str] ): a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator a : int = self.get_dummy_components() a : List[Any] = PNDMScheduler(skip_prk_steps=__snake_case ) a : List[Any] = StableDiffusionLDMaDPipeline(**__snake_case ) a : List[Any] = ldmad_pipe.to(__snake_case ) ldmad_pipe.set_progress_bar_config(disable=__snake_case ) a : Any = self.get_dummy_inputs(__snake_case ) a : Tuple = 'french fries' a : Dict = ldmad_pipe(**__snake_case , negative_prompt=__snake_case ) a , a : str = output.rgb, output.depth a : Optional[Any] = rgb[0, -3:, -3:, -1] a : Dict = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) a : str = np.array( [0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] ) a : str = np.array([107.84738, 84.62802, 89.962135] ) assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2 assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2 @slow @require_torch_gpu class a__( unittest.TestCase ): def lowercase_ ( self : Optional[int] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : Tuple , __snake_case : int , __snake_case : Optional[Any]="cpu" , __snake_case : Optional[Any]=torch.floataa , __snake_case : List[Any]=0 ): a : List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) a : Union[str, Any] = np.random.RandomState(__snake_case ).standard_normal((1, 4, 64, 64) ) a : Tuple = torch.from_numpy(__snake_case ).to(device=__snake_case , dtype=__snake_case ) a : Dict = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def lowercase_ ( self : Optional[Any] ): a : List[str] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ) a : Any = ldmad_pipe.to(__snake_case ) ldmad_pipe.set_progress_bar_config(disable=__snake_case ) a : List[Any] = self.get_inputs(__snake_case ) a : Union[str, Any] = ldmad_pipe(**__snake_case ) a , a : Union[str, Any] = output.rgb, output.depth a : Any = rgb[0, -3:, -3:, -1].flatten() a : int = rgb[0, -3:, -1].flatten() assert rgb.shape == (1, 5_12, 5_12, 3) assert depth.shape == (1, 5_12, 5_12) a : Any = np.array( [0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] ) a : List[str] = np.array( [0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] ) assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3 assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3 @nightly @require_torch_gpu class a__( unittest.TestCase ): def lowercase_ ( self : Any ): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : Dict , __snake_case : str , __snake_case : Any="cpu" , __snake_case : Optional[Any]=torch.floataa , __snake_case : Optional[Any]=0 ): a : int = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) a : Tuple = np.random.RandomState(__snake_case ).standard_normal((1, 4, 64, 64) ) a : List[str] = torch.from_numpy(__snake_case ).to(device=__snake_case , dtype=__snake_case ) a : List[Any] = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 50, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def lowercase_ ( self : Optional[int] ): a : Tuple = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(__snake_case ) ldmad_pipe.set_progress_bar_config(disable=__snake_case ) a : str = self.get_inputs(__snake_case ) a : List[str] = ldmad_pipe(**__snake_case ) a , a : Dict = output.rgb, output.depth a : Union[str, Any] = 0.495586 a : Dict = 0.33795515 a : Optional[Any] = 112.48518 a : Union[str, Any] = 98.489746 assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3 assert np.abs(expected_depth_std - depth.std() ) < 1e-3 def lowercase_ ( self : str ): a : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(__snake_case ) ldmad_pipe.set_progress_bar_config(disable=__snake_case ) a : List[Any] = self.get_inputs(__snake_case ) a : Dict = ldmad_pipe(**__snake_case ) a , a : Any = output.rgb, output.depth a : List[Any] = 0.4194127 a : Optional[Any] = 0.35375586 a : Tuple = 0.5638502 a : Optional[int] = 0.34686103 assert rgb.shape == (1, 5_12, 5_12, 3) assert depth.shape == (1, 5_12, 5_12, 1) assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3 assert np.abs(expected_depth_std - depth.std() ) < 1e-3
297
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowerCAmelCase: Union[str, Any] = logging.get_logger(__name__) lowerCAmelCase: List[str] = { 't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json', } class a__( lowerCamelCase__ ): lowercase__ = """t5""" lowercase__ = ["""past_key_values"""] lowercase__ = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self : Union[str, Any] , __snake_case : int=3_21_28 , __snake_case : str=5_12 , __snake_case : Dict=64 , __snake_case : Optional[int]=20_48 , __snake_case : Tuple=6 , __snake_case : Any=None , __snake_case : Optional[int]=8 , __snake_case : str=32 , __snake_case : Union[str, Any]=1_28 , __snake_case : Optional[int]=0.1 , __snake_case : Dict=1e-6 , __snake_case : int=1.0 , __snake_case : Optional[int]="relu" , __snake_case : Any=True , __snake_case : List[str]=True , __snake_case : Union[str, Any]=0 , __snake_case : Dict=1 , **__snake_case : Optional[int] , ): a : Optional[int] = vocab_size a : Dict = d_model a : Union[str, Any] = d_kv a : Dict = d_ff a : Tuple = num_layers a : Dict = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry a : int = num_heads a : str = relative_attention_num_buckets a : List[Any] = relative_attention_max_distance a : int = dropout_rate a : Tuple = layer_norm_epsilon a : str = initializer_factor a : List[Any] = feed_forward_proj a : Union[str, Any] = use_cache a : List[str] = self.feed_forward_proj.split('-' ) a : int = act_info[-1] a : Union[str, Any] = act_info[0] == 'gated' if len(__snake_case ) > 1 and act_info[0] != "gated" or len(__snake_case ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": a : Optional[Any] = 'gelu_new' super().__init__( pad_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , **__snake_case , ) class a__( lowerCamelCase__ ): @property def lowercase_ ( self : Optional[int] ): a : Dict = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: a : Dict = 'past_encoder_sequence + sequence' a : Dict = {0: 'batch'} a : Any = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: a : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'} a : List[str] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(__snake_case , direction='inputs' ) return common_inputs @property def lowercase_ ( self : List[Any] ): return 13
297
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer lowerCAmelCase: Optional[int] = logging.get_logger(__name__) lowerCAmelCase: Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase: Optional[Any] = { 'vocab_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt' ), 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt' ), 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt', 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json' ), 'bert-base-multilingual-cased': ( 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json' ), 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-cased': ( 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json' ), }, } lowerCAmelCase: List[str] = { 'bert-base-uncased': 5_1_2, 'bert-large-uncased': 5_1_2, 'bert-base-cased': 5_1_2, 'bert-large-cased': 5_1_2, 'bert-base-multilingual-uncased': 5_1_2, 'bert-base-multilingual-cased': 5_1_2, 'bert-base-chinese': 5_1_2, 'bert-base-german-cased': 5_1_2, 'bert-large-uncased-whole-word-masking': 5_1_2, 'bert-large-cased-whole-word-masking': 5_1_2, 'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2, 'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2, 'bert-base-cased-finetuned-mrpc': 5_1_2, 'bert-base-german-dbmdz-cased': 5_1_2, 'bert-base-german-dbmdz-uncased': 5_1_2, 'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2, 'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2, 'wietsedv/bert-base-dutch-cased': 5_1_2, } lowerCAmelCase: Optional[int] = { 'bert-base-uncased': {'do_lower_case': True}, 'bert-large-uncased': {'do_lower_case': True}, 'bert-base-cased': {'do_lower_case': False}, 'bert-large-cased': {'do_lower_case': False}, 'bert-base-multilingual-uncased': {'do_lower_case': True}, 'bert-base-multilingual-cased': {'do_lower_case': False}, 'bert-base-chinese': {'do_lower_case': False}, 'bert-base-german-cased': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False}, 'bert-base-cased-finetuned-mrpc': {'do_lower_case': False}, 'bert-base-german-dbmdz-cased': {'do_lower_case': False}, 'bert-base-german-dbmdz-uncased': {'do_lower_case': True}, 'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False}, 'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True}, 'wietsedv/bert-base-dutch-cased': {'do_lower_case': False}, } class a__( lowerCamelCase__ ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_INIT_CONFIGURATION lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = BertTokenizer def __init__( self : Union[str, Any] , __snake_case : Any=None , __snake_case : Any=None , __snake_case : List[Any]=True , __snake_case : int="[UNK]" , __snake_case : Optional[int]="[SEP]" , __snake_case : List[str]="[PAD]" , __snake_case : List[Any]="[CLS]" , __snake_case : Dict="[MASK]" , __snake_case : int=True , __snake_case : List[str]=None , **__snake_case : Any , ): super().__init__( __snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , ) a : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , __snake_case ) != do_lower_case or normalizer_state.get('strip_accents' , __snake_case ) != strip_accents or normalizer_state.get('handle_chinese_chars' , __snake_case ) != tokenize_chinese_chars ): a : str = getattr(__snake_case , normalizer_state.pop('type' ) ) a : List[str] = do_lower_case a : Tuple = strip_accents a : Optional[Any] = tokenize_chinese_chars a : int = normalizer_class(**__snake_case ) a : str = do_lower_case def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : str=None ): a : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase_ ( self : Tuple , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ): a : Dict = [self.sep_token_id] a : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase_ ( self : List[Any] , __snake_case : str , __snake_case : Optional[str] = None ): a : int = self._tokenizer.model.save(__snake_case , name=__snake_case ) return tuple(__snake_case )
297
'''simple docstring''' from __future__ import annotations from math import pi, sqrt def lowerCamelCase__ ( _A , _A ): if inductance <= 0: raise ValueError('Inductance cannot be 0 or negative' ) elif capacitance <= 0: raise ValueError('Capacitance cannot be 0 or negative' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
297
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class a__( unittest.TestCase ): def lowercase_ ( self : str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase_ ( self : Optional[Any] ): a : List[Any] = 1 a : Any = 3 a : List[str] = (32, 32) a : List[str] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__snake_case ) return image @property def lowercase_ ( self : Union[str, Any] ): torch.manual_seed(0 ) a : int = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) return model @property def lowercase_ ( self : Optional[Any] ): torch.manual_seed(0 ) a : List[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def lowercase_ ( self : int ): torch.manual_seed(0 ) a : Optional[Any] = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , ) return RobertaSeriesModelWithTransformation(__snake_case ) @property def lowercase_ ( self : Any ): def extract(*__snake_case : str , **__snake_case : Tuple ): class a__: def __init__( self : List[str] ): a : Any = torch.ones([0] ) def lowercase_ ( self : Dict , __snake_case : Tuple ): self.pixel_values.to(__snake_case ) return self return Out() return extract def lowercase_ ( self : List[str] ): a : int = 'cpu' # ensure determinism for the device-dependent torch.Generator a : Union[str, Any] = self.dummy_cond_unet a : Optional[Any] = PNDMScheduler(skip_prk_steps=__snake_case ) a : Tuple = self.dummy_vae a : Any = self.dummy_text_encoder a : Optional[Any] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' ) a : Dict = 77 a : Tuple = self.dummy_image.to(__snake_case ) a : Dict = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk a : List[Any] = AltDiffusionImgaImgPipeline( unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , ) a : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__snake_case ) a : Any = alt_pipe.to(__snake_case ) alt_pipe.set_progress_bar_config(disable=__snake_case ) a : Union[str, Any] = 'A painting of a squirrel eating a burger' a : str = torch.Generator(device=__snake_case ).manual_seed(0 ) a : Dict = alt_pipe( [prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=__snake_case , ) a : Tuple = output.images a : Union[str, Any] = torch.Generator(device=__snake_case ).manual_seed(0 ) a : Optional[Any] = alt_pipe( [prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=__snake_case , return_dict=__snake_case , )[0] a : Dict = image[0, -3:, -3:, -1] a : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) a : Any = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3 @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def lowercase_ ( self : int ): a : Optional[Any] = self.dummy_cond_unet a : int = PNDMScheduler(skip_prk_steps=__snake_case ) a : List[str] = self.dummy_vae a : Optional[int] = self.dummy_text_encoder a : int = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' ) a : List[Any] = 77 a : Optional[int] = self.dummy_image.to(__snake_case ) # put models in fp16 a : Optional[int] = unet.half() a : List[str] = vae.half() a : Tuple = bert.half() # make sure here that pndm scheduler skips prk a : Tuple = AltDiffusionImgaImgPipeline( unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , ) a : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__snake_case ) a : Tuple = alt_pipe.to(__snake_case ) alt_pipe.set_progress_bar_config(disable=__snake_case ) a : Union[str, Any] = 'A painting of a squirrel eating a burger' a : int = torch.manual_seed(0 ) a : Union[str, Any] = alt_pipe( [prompt] , generator=__snake_case , num_inference_steps=2 , output_type='np' , image=__snake_case , ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def lowercase_ ( self : str ): a : str = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) # resize to resolution that is divisible by 8 but not 16 or 32 a : Optional[Any] = init_image.resize((7_60, 5_04) ) a : Any = 'BAAI/AltDiffusion' a : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained( __snake_case , safety_checker=__snake_case , ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) pipe.enable_attention_slicing() a : List[str] = 'A fantasy landscape, trending on artstation' a : str = torch.manual_seed(0 ) a : Dict = pipe( prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , generator=__snake_case , output_type='np' , ) a : Tuple = output.images[0] a : Tuple = image[2_55:2_58, 3_83:3_86, -1] assert image.shape == (5_04, 7_60, 3) a : List[Any] = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class a__( unittest.TestCase ): def lowercase_ ( self : Dict ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : Dict ): a : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) a : Any = init_image.resize((7_68, 5_12) ) a : List[str] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' ) a : Union[str, Any] = 'BAAI/AltDiffusion' a : int = AltDiffusionImgaImgPipeline.from_pretrained( __snake_case , safety_checker=__snake_case , ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) pipe.enable_attention_slicing() a : Union[str, Any] = 'A fantasy landscape, trending on artstation' a : Optional[int] = torch.manual_seed(0 ) a : Tuple = pipe( prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , generator=__snake_case , output_type='np' , ) a : Tuple = output.images[0] assert image.shape == (5_12, 7_68, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1e-2
297
'''simple docstring''' import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase: Any = logging.get_logger(__name__) lowerCAmelCase: Any = {'vocab_file': 'vocab.txt'} lowerCAmelCase: List[Any] = { 'vocab_file': { 'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt', }, } lowerCAmelCase: str = { 'openbmb/cpm-ant-10b': 1_0_2_4, } def lowerCamelCase__ ( _A ): a : Union[str, Any] = collections.OrderedDict() with open(_A , 'r' , encoding='utf-8' ) as reader: a : int = reader.readlines() for index, token in enumerate(_A ): a : int = token.rstrip('\n' ) a : List[Any] = index return vocab class a__( lowerCamelCase__ ): def __init__( self : Dict , __snake_case : Any , __snake_case : Dict="<unk>" , __snake_case : str=2_00 ): a : List[Any] = vocab a : Any = unk_token a : List[str] = max_input_chars_per_word def lowercase_ ( self : Optional[int] , __snake_case : Union[str, Any] ): a : Optional[Any] = list(__snake_case ) if len(__snake_case ) > self.max_input_chars_per_word: return [self.unk_token] a : Any = 0 a : Optional[Any] = [] while start < len(__snake_case ): a : Optional[int] = len(__snake_case ) a : str = None while start < end: a : Optional[Any] = ''.join(chars[start:end] ) if substr in self.vocab: a : List[str] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(__snake_case ) a : List[str] = end return sub_tokens class a__( lowerCamelCase__ ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = ["""input_ids""", """attention_mask"""] lowercase__ = False def __init__( self : Any , __snake_case : str , __snake_case : Tuple="<d>" , __snake_case : List[str]="</d>" , __snake_case : Dict="<s>" , __snake_case : List[Any]="</s>" , __snake_case : int="<pad>" , __snake_case : Any="<unk>" , __snake_case : List[str]="</n>" , __snake_case : int="</_>" , __snake_case : Optional[Any]="left" , **__snake_case : Dict , ): requires_backends(self , ['jieba'] ) super().__init__( bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , ) a : Union[str, Any] = bod_token a : Any = eod_token a : List[str] = load_vocab(__snake_case ) a : Optional[int] = self.encoder[space_token] a : str = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] a : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) ) a : Tuple = {v: k for k, v in self.encoder.items()} a : List[str] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def lowercase_ ( self : Optional[int] ): return self.encoder[self.bod_token] @property def lowercase_ ( self : Dict ): return self.encoder[self.eod_token] @property def lowercase_ ( self : Any ): return self.encoder["\n"] @property def lowercase_ ( self : Tuple ): return len(self.encoder ) def lowercase_ ( self : str ): return dict(self.encoder , **self.added_tokens_encoder ) def lowercase_ ( self : Union[str, Any] , __snake_case : List[str] ): a : List[str] = [] for x in jieba.cut(__snake_case , cut_all=__snake_case ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) ) return output_tokens def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[Any] , **__snake_case : Optional[Any] ): a : Optional[int] = [i for i in token_ids if i >= 0] a : Any = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(__snake_case , **__snake_case ) def lowercase_ ( self : Optional[int] , __snake_case : int ): return token in self.encoder def lowercase_ ( self : int , __snake_case : List[str] ): return "".join(__snake_case ) def lowercase_ ( self : List[str] , __snake_case : Union[str, Any] ): return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) ) def lowercase_ ( self : Tuple , __snake_case : List[str] ): return self.decoder.get(__snake_case , self.unk_token ) def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] = None ): if os.path.isdir(__snake_case ): a : Optional[int] = os.path.join( __snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: a : int = (filename_prefix + '-' if filename_prefix else '') + save_directory a : Any = 0 if " " in self.encoder: a : Union[str, Any] = self.encoder[' '] del self.encoder[" "] if "\n" in self.encoder: a : Tuple = self.encoder['\n'] del self.encoder["\n"] a : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) ) with open(__snake_case , 'w' , encoding='utf-8' ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ' Please check that the vocabulary is not corrupted!' ) a : List[Any] = token_index writer.write(token + '\n' ) index += 1 return (vocab_file,) def lowercase_ ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : List[int] = None ): if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def lowercase_ ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case ) if token_ids_a is not None: return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) return [1] + ([0] * len(__snake_case ))
297
1
'''simple docstring''' import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() lowerCAmelCase: Optional[Any] = logging.get_logger(__name__) lowerCAmelCase: Any = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } lowerCAmelCase: Any = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def lowerCamelCase__ ( _A , _A , _A , _A , _A ): for attribute in key.split('.' ): a : List[Any] = getattr(_A , _A ) if weight_type is not None: a : int = getattr(_A , _A ).shape else: a : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": a : Dict = value elif weight_type == "weight_g": a : Any = value elif weight_type == "weight_v": a : List[str] = value elif weight_type == "bias": a : int = value else: a : int = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def lowerCamelCase__ ( _A , _A ): a : Tuple = [] a : Dict = fairseq_model.state_dict() a : Optional[int] = hf_model.feature_extractor a : Tuple = hf_model.adapter for name, value in fairseq_dict.items(): a : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( _A , _A , _A , _A , hf_model.config.feat_extract_norm == 'group' , ) a : List[str] = True elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ): load_adapter(_A , _A , _A , _A ) a : Tuple = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: a : Union[str, Any] = True if "*" in mapped_key: a : List[Any] = name.split(_A )[0].split('.' )[-2] a : Optional[Any] = mapped_key.replace('*' , _A ) if "weight_g" in name: a : str = 'weight_g' elif "weight_v" in name: a : Optional[Any] = 'weight_v' elif "bias" in name: a : Optional[Any] = 'bias' elif "weight" in name: a : Union[str, Any] = 'weight' else: a : Dict = None set_recursively(_A , _A , _A , _A , _A ) continue if not is_used: unused_weights.append(_A ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowerCamelCase__ ( _A , _A , _A , _A , _A ): a : Optional[int] = full_name.split('conv_layers.' )[-1] a : str = name.split('.' ) a : Tuple = int(items[0] ) a : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) a : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) a : str = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) a : int = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) a : int = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_A ) def lowerCamelCase__ ( _A , _A , _A , _A ): a : List[str] = full_name.split('adaptor.' )[-1] a : Optional[Any] = name.split('.' ) if items[1].isdigit(): a : Optional[Any] = int(items[1] ) else: a : List[str] = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.""" a : Optional[int] = value logger.info(f"""Adapter proj layer norm bias was initialized from {full_name}.""" ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.""" a : Optional[Any] = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), f"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.""" a : List[Any] = value logger.info(f"""Adapter proj layer bias was initialized from {full_name}.""" ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), f"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.""" a : Dict = value logger.info(f"""Adapter proj layer weight was initialized from {full_name}.""" ) elif isinstance(_A , _A ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.""" a : List[str] = value logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.""" a : Union[str, Any] = value logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" ) else: unused_weights.append(_A ) def lowerCamelCase__ ( _A ): a , a : Tuple = emb.weight.shape a : int = nn.Linear(_A , _A , bias=_A ) a : Optional[int] = emb.weight.data return lin_layer @torch.no_grad() def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A , ): a : Optional[int] = WavaVecaConfig.from_pretrained( _A , add_adapter=_A , adapter_stride=_A , adapter_kernel_size=_A , use_auth_token=_A , output_hidden_size=_A , ) a : Tuple = MBartConfig.from_pretrained(_A ) # load model a , a , a : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ 'config_yaml': config_yaml_path, 'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path, 'load_pretrained_decoder_from': None, } , ) a : Dict = model[0].eval() # load feature extractor a : List[Any] = WavaVecaFeatureExtractor.from_pretrained(_A , use_auth_token=_A ) # set weights for wav2vec2 encoder a : Union[str, Any] = WavaVecaModel(_A ) recursively_load_weights_wavaveca(model.encoder , _A ) # load decoder weights a : Union[str, Any] = MBartForCausalLM(_A ) a , a : List[Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_A ) logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) a : Dict = SpeechEncoderDecoderModel(encoder=_A , decoder=_A ) a : Tuple = False a : Any = MBartaaTokenizer(_A ) tokenizer.save_pretrained(_A ) a : Dict = hf_wavavec.config.to_dict() a : Optional[int] = tokenizer.pad_token_id a : Any = tokenizer.bos_token_id a : Optional[Any] = tokenizer.eos_token_id a : List[Any] = 'mbart50' a : Union[str, Any] = 'wav2vec2' a : Optional[int] = tokenizer.eos_token_id a : List[Any] = 25_0004 a : List[str] = tokenizer.eos_token_id a : Optional[Any] = SpeechEncoderDecoderConfig.from_dict(_A ) hf_wavavec.save_pretrained(_A ) feature_extractor.save_pretrained(_A ) if __name__ == "__main__": lowerCAmelCase: List[Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-xls-r-1b', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/mbart-large-50-one-to-many-mmt', type=str, help='Path to hf decoder checkpoint config', ) parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers') parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers') parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers') parser.add_argument('--encoder_output_dim', default=1_0_2_4, type=int, help='encoder output dim') parser.add_argument('--start_token_id', default=2_5_0_0_0_4, type=int, help='`decoder_start_token_id` of model config') lowerCAmelCase: Optional[int] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
297
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class a__( unittest.TestCase ): @slow def lowercase_ ( self : List[Any] ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: a : Optional[int] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = TFAutoModel.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Tuple = AutoModel.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : str ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: a : List[str] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : List[Any] = AutoModelForPreTraining.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : int ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Union[str, Any] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : List[Any] = TFAutoModelForCausalLM.from_pretrained(__snake_case , from_pt=__snake_case ) a , a : Any = TFAutoModelForCausalLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = AutoModelForCausalLM.from_pretrained(__snake_case , from_tf=__snake_case ) a , a : Tuple = AutoModelForCausalLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : Any ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Tuple = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : List[str] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : Optional[int] ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : List[str] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(__snake_case , from_pt=__snake_case ) a , a : Optional[int] = TFAutoModelForMaskedLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : str = AutoModelForMaskedLM.from_pretrained(__snake_case , from_tf=__snake_case ) a , a : Tuple = AutoModelForMaskedLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : int ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Optional[Any] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : str = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_pt=__snake_case ) a , a : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = AutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_tf=__snake_case ) a , a : str = AutoModelForSeqaSeqLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : Optional[Any] ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: a : Tuple = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = AutoModelForSequenceClassification.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : str ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: a : Optional[Any] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : int = TFAutoModelForQuestionAnswering.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Tuple = AutoModelForQuestionAnswering.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) def lowercase_ ( self : Tuple ): a : List[Any] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 ) a : Optional[int] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 ) def lowercase_ ( self : Any ): a : int = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 ) a : Optional[Any] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
297
1
'''simple docstring''' from __future__ import annotations from scipy.special import comb # type: ignore class a__: def __init__( self : List[str] , __snake_case : list[tuple[float, float]] ): a : Optional[Any] = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. a : Optional[Any] = len(__snake_case ) - 1 def lowercase_ ( self : int , __snake_case : float ): assert 0 <= t <= 1, "Time t must be between 0 and 1." a : list[float] = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , __snake_case ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(__snake_case ) , 5 ) == 1 return output_values def lowercase_ ( self : str , __snake_case : float ): assert 0 <= t <= 1, "Time t must be between 0 and 1." a : Tuple = self.basis_function(__snake_case ) a : Any = 0.0 a : List[Any] = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def lowercase_ ( self : List[Any] , __snake_case : float = 0.01 ): from matplotlib import pyplot as plt # type: ignore a : list[float] = [] # x coordinates of points to plot a : list[float] = [] # y coordinates of points to plot a : Optional[Any] = 0.0 while t <= 1: a : Optional[Any] = self.bezier_curve_function(__snake_case ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size a : Dict = [i[0] for i in self.list_of_points] a : Union[str, Any] = [i[1] for i in self.list_of_points] plt.plot( __snake_case , __snake_case , color='blue' , label='Curve of Degree ' + str(self.degree ) , ) plt.scatter(__snake_case , __snake_case , color='red' , label='Control Points' ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
297
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase: List[Any] = logging.get_logger(__name__) lowerCAmelCase: List[Any] = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class a__( lowerCamelCase__ ): lowercase__ = """roberta""" def __init__( self : Tuple , __snake_case : List[str]=5_02_65 , __snake_case : int=7_68 , __snake_case : Union[str, Any]=12 , __snake_case : Dict=12 , __snake_case : Tuple=30_72 , __snake_case : Optional[Any]="gelu" , __snake_case : str=0.1 , __snake_case : Any=0.1 , __snake_case : str=5_12 , __snake_case : int=2 , __snake_case : Any=0.02 , __snake_case : int=1e-1_2 , __snake_case : str=1 , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=2 , __snake_case : Optional[int]="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=None , **__snake_case : str , ): super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) a : List[str] = vocab_size a : str = hidden_size a : Tuple = num_hidden_layers a : Dict = num_attention_heads a : List[Any] = hidden_act a : str = intermediate_size a : Union[str, Any] = hidden_dropout_prob a : Optional[Any] = attention_probs_dropout_prob a : Any = max_position_embeddings a : Optional[int] = type_vocab_size a : str = initializer_range a : List[Any] = layer_norm_eps a : Optional[int] = position_embedding_type a : Dict = use_cache a : Any = classifier_dropout class a__( lowerCamelCase__ ): @property def lowercase_ ( self : int ): if self.task == "multiple-choice": a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: a : str = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
297
1
'''simple docstring''' class a__: # Public class to implement a graph def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : list[list[bool]] ): a : List[str] = row a : List[str] = col a : Optional[Any] = graph def lowercase_ ( self : Any , __snake_case : int , __snake_case : int , __snake_case : list[list[bool]] ): return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def lowercase_ ( self : int , __snake_case : int , __snake_case : int , __snake_case : list[list[bool]] ): # Checking all 8 elements surrounding nth element a : int = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order a : List[Any] = [-1, 0, 1, -1, 1, -1, 0, 1] a : Optional[Any] = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __snake_case ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , __snake_case ) def lowercase_ ( self : List[Any] ): # And finally, count all islands. a : List[Any] = [[False for j in range(self.COL )] for i in range(self.ROW )] a : Dict = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(__snake_case , __snake_case , __snake_case ) count += 1 return count
297
'''simple docstring''' def lowerCamelCase__ ( _A ): return 10 - x * x def lowerCamelCase__ ( _A , _A ): # Bolzano theory in order to find if there is a root between a and b if equation(_A ) * equation(_A ) >= 0: raise ValueError('Wrong space!' ) a : Tuple = a while (b - a) >= 0.01: # Find middle point a : Tuple = (a + b) / 2 # Check if middle point is root if equation(_A ) == 0.0: break # Decide the side to repeat the steps if equation(_A ) * equation(_A ) < 0: a : List[str] = c else: a : Tuple = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
297
1
'''simple docstring''' import enum import shutil import sys lowerCAmelCase , lowerCAmelCase: Optional[Any] = shutil.get_terminal_size() lowerCAmelCase: int = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'} class a__( enum.Enum ): lowercase__ = 0 lowercase__ = 1 def lowerCamelCase__ ( _A , _A="" ): sys.stdout.write(str(_A ) + end ) sys.stdout.flush() def lowerCamelCase__ ( _A , _A , _A="" ): forceWrite(f"""\u001b[{color}m{content}\u001b[0m""" , _A ) def lowerCamelCase__ ( ): forceWrite('\r' ) def lowerCamelCase__ ( _A , _A ): forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" ) def lowerCamelCase__ ( ): forceWrite(' ' * TERMINAL_WIDTH ) reset_cursor() def lowerCamelCase__ ( ): reset_cursor() forceWrite('-' * TERMINAL_WIDTH )
297
'''simple docstring''' import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class a__: def __init__( self : str , __snake_case : Union[str, Any] , __snake_case : List[str]=13 , __snake_case : Tuple=7 , __snake_case : Optional[Any]=False , __snake_case : Dict=True , __snake_case : List[Any]=False , __snake_case : Optional[int]=False , __snake_case : Optional[Any]=19 , __snake_case : Any=32 , __snake_case : Union[str, Any]=5 , __snake_case : Union[str, Any]=4 , __snake_case : int=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=0.1 , __snake_case : int=5_12 , __snake_case : int=16 , __snake_case : Tuple=2 , __snake_case : str=0.02 , __snake_case : str=3 , __snake_case : Dict=4 , __snake_case : List[Any]=None , ): a : Tuple = parent a : List[str] = batch_size a : Optional[Any] = seq_length a : Tuple = is_training a : Optional[Any] = use_input_mask a : List[Any] = use_token_type_ids a : List[Any] = use_labels a : int = vocab_size a : Union[str, Any] = hidden_size a : Any = num_hidden_layers a : List[str] = num_attention_heads a : int = intermediate_size a : str = hidden_act a : Tuple = hidden_dropout_prob a : Union[str, Any] = attention_probs_dropout_prob a : List[str] = max_position_embeddings a : Any = type_vocab_size a : List[str] = type_sequence_label_size a : Union[str, Any] = initializer_range a : Optional[int] = num_labels a : Optional[Any] = num_choices a : Optional[int] = scope def lowercase_ ( self : List[Any] ): a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a : Dict = None if self.use_input_mask: a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) a : Optional[Any] = None a : Optional[int] = None a : Dict = None if self.use_labels: a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a : List[str] = ids_tensor([self.batch_size] , self.num_choices ) a : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase_ ( self : List[Any] ): a : Any = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__snake_case , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , ) return config def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Any ): a : Tuple = EsmForProteinFolding(config=__snake_case ).float() model.to(__snake_case ) model.eval() a : Dict = model(__snake_case , attention_mask=__snake_case ) a : Union[str, Any] = model(__snake_case ) a : List[Any] = model(__snake_case ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def lowercase_ ( self : Optional[Any] ): a : Tuple = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) : Optional[Any] = config_and_inputs a : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase__ = False lowercase__ = (EsmForProteinFolding,) if is_torch_available() else () lowercase__ = () lowercase__ = {} if is_torch_available() else {} lowercase__ = False def lowercase_ ( self : int ): a : Tuple = EsmFoldModelTester(self ) a : Any = ConfigTester(self , config_class=__snake_case , hidden_size=37 ) def lowercase_ ( self : List[str] ): self.config_tester.run_common_tests() def lowercase_ ( self : Union[str, Any] ): a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) @unittest.skip('Does not support attention outputs' ) def lowercase_ ( self : str ): pass @unittest.skip def lowercase_ ( self : Optional[int] ): pass @unittest.skip('Esm does not support embedding resizing' ) def lowercase_ ( self : Optional[int] ): pass @unittest.skip('Esm does not support embedding resizing' ) def lowercase_ ( self : Any ): pass @unittest.skip('ESMFold does not support passing input embeds!' ) def lowercase_ ( self : Any ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : Union[str, Any] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : int ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold does not output hidden states in the normal way.' ) def lowercase_ ( self : int ): pass @unittest.skip('ESMfold does not output hidden states in the normal way.' ) def lowercase_ ( self : int ): pass @unittest.skip('ESMFold only has one output format.' ) def lowercase_ ( self : Dict ): pass @unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' ) def lowercase_ ( self : Tuple ): pass @unittest.skip('ESMFold does not support input chunking.' ) def lowercase_ ( self : List[str] ): pass @unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowercase_ ( self : Union[str, Any] ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowercase_ ( self : Any ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowercase_ ( self : List[str] ): pass @unittest.skip('ESMFold doesn\'t support data parallel.' ) def lowercase_ ( self : Dict ): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase_ ( self : Union[str, Any] ): pass @require_torch class a__( lowerCamelCase__ ): @slow def lowercase_ ( self : Optional[int] ): a : Optional[Any] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float() model.eval() a : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) a : Any = model(__snake_case )['positions'] a : Dict = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __snake_case , atol=1e-4 ) )
297
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class a__( lowerCamelCase__ , unittest.TestCase ): lowercase__ = ShapEPipeline lowercase__ = ["""prompt"""] lowercase__ = ["""prompt"""] lowercase__ = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] lowercase__ = False @property def lowercase_ ( self : Optional[Any] ): return 32 @property def lowercase_ ( self : List[str] ): return 32 @property def lowercase_ ( self : List[Any] ): return self.time_input_dim * 4 @property def lowercase_ ( self : str ): return 8 @property def lowercase_ ( self : List[str] ): a : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def lowercase_ ( self : List[str] ): torch.manual_seed(0 ) a : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(__snake_case ) @property def lowercase_ ( self : int ): torch.manual_seed(0 ) a : Dict = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } a : Optional[Any] = PriorTransformer(**__snake_case ) return model @property def lowercase_ ( self : int ): torch.manual_seed(0 ) a : str = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } a : int = ShapERenderer(**__snake_case ) return model def lowercase_ ( self : str ): a : Optional[int] = self.dummy_prior a : int = self.dummy_text_encoder a : Optional[int] = self.dummy_tokenizer a : List[str] = self.dummy_renderer a : Dict = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=__snake_case , clip_sample=__snake_case , clip_sample_range=1.0 , ) a : Optional[int] = { 'prior': prior, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'renderer': renderer, 'scheduler': scheduler, } return components def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Optional[int]=0 ): if str(__snake_case ).startswith('mps' ): a : Dict = torch.manual_seed(__snake_case ) else: a : Tuple = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) a : Tuple = { 'prompt': 'horse', 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def lowercase_ ( self : List[Any] ): a : Tuple = 'cpu' a : str = self.get_dummy_components() a : str = self.pipeline_class(**__snake_case ) a : List[Any] = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) a : Any = pipe(**self.get_dummy_inputs(__snake_case ) ) a : Any = output.images[0] a : Any = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) a : Optional[Any] = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self : str ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowercase_ ( self : Optional[int] ): a : Dict = torch_device == 'cpu' a : Tuple = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=__snake_case , relax_max_difference=__snake_case , ) def lowercase_ ( self : List[Any] ): a : Dict = self.get_dummy_components() a : int = self.pipeline_class(**__snake_case ) a : Union[str, Any] = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) a : List[str] = 1 a : Union[str, Any] = 2 a : str = self.get_dummy_inputs(__snake_case ) for key in inputs.keys(): if key in self.batch_params: a : Optional[int] = batch_size * [inputs[key]] a : Any = pipe(**__snake_case , num_images_per_prompt=__snake_case )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class a__( unittest.TestCase ): def lowercase_ ( self : Union[str, Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : Any ): a : List[str] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_np_out.npy' ) a : Any = ShapEPipeline.from_pretrained('openai/shap-e' ) a : int = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) a : Dict = torch.Generator(device=__snake_case ).manual_seed(0 ) a : Dict = pipe( 'a shark' , generator=__snake_case , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(__snake_case , __snake_case )
297
'''simple docstring''' from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class a__( nn.Module ): def __init__( self : Any , __snake_case : int = 16 , __snake_case : int = 88 , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : float = 0.0 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , ): super().__init__() a : Optional[int] = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__snake_case , attention_head_dim=__snake_case , in_channels=__snake_case , num_layers=__snake_case , dropout=__snake_case , norm_num_groups=__snake_case , cross_attention_dim=__snake_case , attention_bias=__snake_case , sample_size=__snake_case , num_vector_embeds=__snake_case , activation_fn=__snake_case , num_embeds_ada_norm=__snake_case , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference a : Union[str, Any] = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` a : Tuple = [77, 2_57] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` a : Any = [1, 0] def lowercase_ ( self : str , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Dict=None , __snake_case : bool = True , ): a : Dict = hidden_states a : Tuple = [] a : Optional[int] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens a : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] a : Tuple = self.transformer_index_for_condition[i] a : Union[str, Any] = self.transformers[transformer_index]( __snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , return_dict=__snake_case , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] a : Optional[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) a : int = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__snake_case )
297
1
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def lowerCamelCase__ ( _A ): # vision encoder if "img_encoder.pos_embed" in name: a : Tuple = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' ) if "img_encoder.patch_embed.proj" in name: a : Optional[Any] = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' ) if "img_encoder.patch_embed.norm" in name: a : Union[str, Any] = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' ) if "img_encoder.layers" in name: a : List[Any] = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' ) if "blocks" in name and "res" not in name: a : Tuple = name.replace('blocks' , 'layers' ) if "attn" in name and "pre_assign" not in name: a : List[Any] = name.replace('attn' , 'self_attn' ) if "proj" in name and "self_attn" in name and "text" not in name: a : List[Any] = name.replace('proj' , 'out_proj' ) if "pre_assign_attn.attn.proj" in name: a : int = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' ) if "norm1" in name: a : str = name.replace('norm1' , 'layer_norm1' ) if "norm2" in name and "pre_assign" not in name: a : Optional[Any] = name.replace('norm2' , 'layer_norm2' ) if "img_encoder.norm" in name: a : Union[str, Any] = name.replace('img_encoder.norm' , 'vision_model.layernorm' ) # text encoder if "text_encoder.token_embedding" in name: a : List[str] = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' ) if "text_encoder.positional_embedding" in name: a : int = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "text_encoder.transformer.resblocks." in name: a : Dict = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' ) if "ln_1" in name: a : List[str] = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: a : int = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: a : Union[str, Any] = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: a : Optional[Any] = name.replace('c_proj' , 'fc2' ) if "text_encoder" in name: a : Optional[int] = name.replace('text_encoder' , 'text_model' ) if "ln_final" in name: a : int = name.replace('ln_final' , 'final_layer_norm' ) # projection layers if "img_projector.linear_hidden." in name: a : int = name.replace('img_projector.linear_hidden.' , 'visual_projection.' ) if "img_projector.linear_out." in name: a : Union[str, Any] = name.replace('img_projector.linear_out.' , 'visual_projection.3.' ) if "text_projector.linear_hidden" in name: a : Optional[Any] = name.replace('text_projector.linear_hidden' , 'text_projection' ) if "text_projector.linear_out" in name: a : Tuple = name.replace('text_projector.linear_out' , 'text_projection.3' ) return name def lowerCamelCase__ ( _A , _A ): for key in orig_state_dict.copy().keys(): a : Dict = orig_state_dict.pop(_A ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors a : str = key.split('.' ) a , a : Dict = int(key_split[2] ), int(key_split[4] ) a : Optional[int] = config.vision_config.hidden_size if "weight" in key: a : Any = val[:dim, :] a : List[Any] = val[dim : dim * 2, :] a : Tuple = val[-dim:, :] else: a : int = val[:dim] a : Union[str, Any] = val[dim : dim * 2] a : Optional[Any] = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors a : List[Any] = key.split('.' ) a : Tuple = int(key_split[3] ) a : str = config.text_config.hidden_size if "weight" in key: a : Union[str, Any] = val[:dim, :] a : List[str] = val[ dim : dim * 2, : ] a : List[Any] = val[-dim:, :] else: a : Dict = val[:dim] a : int = val[dim : dim * 2] a : Optional[int] = val[-dim:] else: a : List[str] = rename_key(_A ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): a : str = val.squeeze_() else: a : Dict = val return orig_state_dict def lowerCamelCase__ ( ): a : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg' a : Tuple = Image.open(requests.get(_A , stream=_A ).raw ) return im @torch.no_grad() def lowerCamelCase__ ( _A , _A , _A="groupvit-gcc-yfcc" , _A=False ): a : List[str] = GroupViTConfig() a : Any = GroupViTModel(_A ).eval() a : List[Any] = torch.load(_A , map_location='cpu' )['model'] a : Optional[Any] = convert_state_dict(_A , _A ) a , a : Union[str, Any] = model.load_state_dict(_A , strict=_A ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_A ) == 0) # verify result a : List[str] = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' ) a : str = prepare_img() a : Tuple = processor(text=['a photo of a cat', 'a photo of a dog'] , images=_A , padding=_A , return_tensors='pt' ) with torch.no_grad(): a : Dict = model(**_A ) if model_name == "groupvit-gcc-yfcc": a : str = torch.tensor([[13.3523, 6.3629]] ) elif model_name == "groupvit-gcc-redcaps": a : int = torch.tensor([[16.1873, 8.6230]] ) else: raise ValueError(f"""Model name {model_name} not supported.""" ) assert torch.allclose(outputs.logits_per_image , _A , atol=1E-3 ) processor.save_pretrained(_A ) model.save_pretrained(_A ) print('Successfully saved processor and model to' , _A ) if push_to_hub: print('Pushing to the hub...' ) processor.push_to_hub(_A , organization='nielsr' ) model.push_to_hub(_A , organization='nielsr' ) if __name__ == "__main__": lowerCAmelCase: List[str] = argparse.ArgumentParser() parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.' ) parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint') parser.add_argument( '--model_name', default='groupvit-gccy-fcc', type=str, help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.', ) lowerCAmelCase: int = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
297
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase: Union[str, Any] = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: List[Any] = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Any = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase: Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
297
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class a__( lowerCamelCase__ ): def __init__( self : Optional[Any] , __snake_case : List[Any] ): a : Dict = data def __iter__( self : Tuple ): for element in self.data: yield element def lowerCamelCase__ ( _A=True ): a : str = Accelerator(even_batches=_A ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def lowerCamelCase__ ( _A , _A , _A , _A = False ): if iterable: a : Dict = DummyIterableDataset(torch.as_tensor(range(_A ) ) ) else: a : Any = TensorDataset(torch.as_tensor(range(_A ) ) ) a : Any = DataLoader(_A , batch_size=_A ) a : List[str] = accelerator.prepare(_A ) return dl def lowerCamelCase__ ( _A , _A , _A , _A , _A , ): a : Dict = create_dataloader(accelerator=_A , dataset_size=_A , batch_size=_A ) a : Dict = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def lowerCamelCase__ ( ): a : List[Any] = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( _A , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( _A , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , ) def lowerCamelCase__ ( ): a : Dict = create_accelerator(even_batches=_A ) verify_dataloader_batch_sizes( _A , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , ) verify_dataloader_batch_sizes( _A , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , ) def lowerCamelCase__ ( ): a : Optional[int] = create_accelerator(even_batches=_A ) a : Any = torch.nn.Linear(1 , 1 ) a : Optional[Any] = accelerator.prepare(_A ) a : Dict = create_dataloader(_A , dataset_size=3 , batch_size=1 ) a : List[str] = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(_A ): a : List[Any] = ddp_model(batch[0].float() ) a : Tuple = output.sum() loss.backward() batch_idxs.append(_A ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def lowerCamelCase__ ( _A ): with warnings.catch_warnings(record=_A ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category , _A ) assert "only supported for multi-GPU" in str(w[-1].message ) def lowerCamelCase__ ( ): a : Optional[int] = True a : Tuple = False a : Optional[int] = create_accelerator(even_batches=_A ) a : int = torch.nn.Linear(1 , 1 ) a : Optional[int] = accelerator.prepare(_A ) a : str = create_dataloader(_A , dataset_size=3 , batch_size=1 ) a : Dict = create_dataloader(_A , dataset_size=3 , batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model] , even_batches=_A ): a : Tuple = train_dl.batch_sampler.even_batches a : Optional[int] = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def lowerCamelCase__ ( ): a : Optional[Any] = True a : Tuple = False a : Tuple = create_accelerator(even_batches=_A ) a : int = torch.nn.Linear(1 , 1 ) a : Tuple = accelerator.prepare(_A ) create_dataloader(_A , dataset_size=3 , batch_size=1 , iterable=_A ) a : str = create_dataloader(_A , dataset_size=3 , batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings('ignore' ) try: with accelerator.join_uneven_inputs([ddp_model] , even_batches=_A ): a : int = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def lowerCamelCase__ ( ): a : Union[str, Any] = create_accelerator() a : int = torch.nn.Linear(1 , 1 ) a : str = accelerator.prepare(_A ) create_dataloader(_A , dataset_size=3 , batch_size=1 , iterable=_A ) with warnings.catch_warnings(record=_A ) as w: with accelerator.join_uneven_inputs([ddp_model] , even_batches=_A ): pass assert issubclass(w[-1].category , _A ) assert "only supported for map-style datasets" in str(w[-1].message ) def lowerCamelCase__ ( ): a : Optional[Any] = create_accelerator() accelerator.print('Test that even_batches variable ensures uniform batches across processes' ) test_default_ensures_even_batch_sizes() accelerator.print('Run tests with even_batches disabled' ) test_can_disable_even_batches() accelerator.print('Test joining uneven inputs' ) test_can_join_uneven_inputs() accelerator.print('Test overriding even_batches when joining uneven inputs' ) test_join_can_override_even_batches() accelerator.print('Test overriding even_batches for mixed dataloader types' ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print('Test join with non DDP distributed raises warning' ) a : List[Any] = accelerator.state.distributed_type a : str = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(_A ) a : List[str] = original_state if __name__ == "__main__": main()
297
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase: str = { 'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'], 'processing_mgp_str': ['MgpstrProcessor'], 'tokenization_mgp_str': ['MgpstrTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Optional[Any] = [ 'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST', 'MgpstrModel', 'MgpstrPreTrainedModel', 'MgpstrForSceneTextRecognition', ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys lowerCAmelCase: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
297
1
'''simple docstring''' from __future__ import annotations def lowerCamelCase__ ( _A ): a : str = [True] * limit a : Optional[int] = False a : List[str] = False a : Any = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): a : int = i * 2 while index < limit: a : Union[str, Any] = False a : Union[str, Any] = index + i a : Dict = [2] for i in range(3 , _A , 2 ): if is_prime[i]: primes.append(_A ) return primes def lowerCamelCase__ ( _A = 100_0000 ): a : List[str] = prime_sieve(_A ) a : str = 0 a : Union[str, Any] = 0 for i in range(len(_A ) ): for j in range(i + length , len(_A ) ): a : Optional[int] = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: a : Optional[Any] = j - i a : Any = sol return largest if __name__ == "__main__": print(F"{solution() = }")
297
'''simple docstring''' import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowerCAmelCase: Dict = logging.get_logger(__name__) lowerCAmelCase: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase: List[Any] = { 'vocab_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json', }, 'merges_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt', }, 'tokenizer_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json', }, } lowerCAmelCase: str = { 'allenai/led-base-16384': 1_6_3_8_4, } class a__( lowerCamelCase__ ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = LEDTokenizer lowercase__ = ["""input_ids""", """attention_mask"""] def __init__( self : List[Any] , __snake_case : Optional[Any]=None , __snake_case : List[str]=None , __snake_case : Tuple=None , __snake_case : Dict="replace" , __snake_case : int="<s>" , __snake_case : Any="</s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : List[str]="<pad>" , __snake_case : int="<mask>" , __snake_case : int=False , __snake_case : str=True , **__snake_case : Tuple , ): super().__init__( __snake_case , __snake_case , tokenizer_file=__snake_case , errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case , **__snake_case , ) a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , __snake_case ) != add_prefix_space: a : List[Any] = getattr(__snake_case , pre_tok_state.pop('type' ) ) a : Optional[Any] = add_prefix_space a : Optional[Any] = pre_tok_class(**__snake_case ) a : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` a : Dict = 'post_processor' a : int = getattr(self.backend_tokenizer , __snake_case , __snake_case ) if tokenizer_component_instance: a : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a : Any = tuple(state['sep'] ) if "cls" in state: a : Any = tuple(state['cls'] ) a : Optional[Any] = False if state.get('add_prefix_space' , __snake_case ) != add_prefix_space: a : Any = add_prefix_space a : Optional[Any] = True if state.get('trim_offsets' , __snake_case ) != trim_offsets: a : List[Any] = trim_offsets a : Union[str, Any] = True if changes_to_apply: a : int = getattr(__snake_case , state.pop('type' ) ) a : List[Any] = component_class(**__snake_case ) setattr(self.backend_tokenizer , __snake_case , __snake_case ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def lowercase_ ( self : Dict ): if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def lowercase_ ( self : Dict , __snake_case : List[str] ): a : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else value a : Optional[int] = value def lowercase_ ( self : Optional[Any] , *__snake_case : Any , **__snake_case : Union[str, Any] ): a : Dict = kwargs.get('is_split_into_words' , __snake_case ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*__snake_case , **__snake_case ) def lowercase_ ( self : Union[str, Any] , *__snake_case : Optional[int] , **__snake_case : List[str] ): a : Optional[int] = kwargs.get('is_split_into_words' , __snake_case ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.' ) return super()._encode_plus(*__snake_case , **__snake_case ) def lowercase_ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ): a : Union[str, Any] = self._tokenizer.model.save(__snake_case , name=__snake_case ) return tuple(__snake_case ) def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : int=None ): a : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ): a : int = [self.sep_token_id] a : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase_ ( self : List[str] , __snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ): a : Optional[Any] = super()._pad( encoded_inputs=__snake_case , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , ) # Load from model defaults if return_attention_mask is None: a : str = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: a : Any = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. a : Union[str, Any] = len(encoded_inputs['global_attention_mask'] ) != len(__snake_case ) if needs_to_be_padded: a : str = len(__snake_case ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` a : Dict = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": a : Union[str, Any] = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
297
1
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase__ ( _A , _A ): assert isinstance(_A , _A ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def lowerCamelCase__ ( _A , _A , _A ): a : str = tmp_path / 'cache' a : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def lowerCamelCase__ ( _A , _A , _A ): a : str = tmp_path / 'cache' a : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : Dict = features.copy() if features else default_expected_features a : Union[str, Any] = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( 'features' , [ None, {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}, ] , ) def lowerCamelCase__ ( _A , _A , _A ): a : Tuple = tmp_path / 'cache' a : Optional[Any] = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'} a : Optional[int] = features.copy() if features else default_expected_features a : Dict = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) a : Optional[int] = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowerCamelCase__ ( _A , _A ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} a : Dict = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'} a : int = features.copy() a : List[Any] = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) a : Dict = tmp_path / 'cache' a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def lowerCamelCase__ ( _A , _A , _A ): a : Dict = tmp_path / 'cache' a : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read() _check_json_dataset(_A , _A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def lowerCamelCase__ ( _A , _A , _A ): if issubclass(_A , _A ): a : Optional[int] = jsonl_path elif issubclass(_A , _A ): a : Optional[int] = [jsonl_path] a : List[str] = tmp_path / 'cache' a : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) def lowerCamelCase__ ( _A , _A , _A=("train",) ): assert isinstance(_A , _A ) for split in splits: a : str = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def lowerCamelCase__ ( _A , _A , _A ): a : Dict = tmp_path / 'cache' a : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): a : int = JsonDatasetReader({'train': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def lowerCamelCase__ ( _A , _A , _A ): a : Dict = tmp_path / 'cache' a : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : List[Any] = features.copy() if features else default_expected_features a : Any = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) a : List[str] = JsonDatasetReader({'train': jsonl_path} , features=_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def lowerCamelCase__ ( _A , _A , _A ): if split: a : Any = {split: jsonl_path} else: a : List[Any] = 'train' a : List[str] = {'train': jsonl_path, 'test': jsonl_path} a : List[Any] = tmp_path / 'cache' a : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase__ ( _A ): return json.load(_A ) def lowerCamelCase__ ( _A ): return [json.loads(_A ) for line in buffer] class a__: @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def lowercase_ ( self : Tuple , __snake_case : int , __snake_case : Optional[int] , __snake_case : Any ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case ).write() buffer.seek(0 ) a : List[str] = load_json_function(__snake_case ) assert isinstance(__snake_case , __snake_case ) assert isinstance(exported_content[0] , __snake_case ) assert len(__snake_case ) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def lowercase_ ( self : Tuple , __snake_case : Tuple , __snake_case : Any , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case ).write() buffer.seek(0 ) a : int = load_json(__snake_case ) assert isinstance(__snake_case , __snake_case ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(__snake_case ) == 10 @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def lowercase_ ( self : List[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Optional[Any] ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , num_proc=2 ).write() buffer.seek(0 ) a : List[Any] = load_json_function(__snake_case ) assert isinstance(__snake_case , __snake_case ) assert isinstance(exported_content[0] , __snake_case ) assert len(__snake_case ) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def lowercase_ ( self : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : int , __snake_case : List[Any] , __snake_case : Dict ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case , num_proc=2 ).write() buffer.seek(0 ) a : int = load_json(__snake_case ) assert isinstance(__snake_case , __snake_case ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(__snake_case ) == 10 def lowercase_ ( self : List[str] , __snake_case : str ): with pytest.raises(__snake_case ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , num_proc=0 ) @pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] ) def lowercase_ ( self : Tuple , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Optional[int] ): a : Tuple = tmp_path_factory.mktemp('data' ) / F"""test.json.{extension}""" a : List[Any] = str(shared_datadir / F"""test_file.json.{extension}""" ) JsonDatasetWriter(__snake_case , __snake_case , compression=__snake_case ).write() with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f: a : Union[str, Any] = f.read() with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f: a : Union[str, Any] = f.read() assert exported_content == original_content
297
'''simple docstring''' import copy import os import cva import numpy as np from matplotlib import pyplot as plt class a__: def __init__( self : Tuple ): a : Optional[int] = '' a : Optional[Any] = '' a : str = [] a : int = 0 a : str = 2_56 a : Union[str, Any] = 0 a : Any = 0 a : Optional[int] = 0 a : List[str] = 0 def lowercase_ ( self : str , __snake_case : str ): a : Any = cva.imread(__snake_case , 0 ) a : Optional[Any] = copy.deepcopy(self.img ) a , a , a : int = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' ) a : Optional[int] = np.sum(__snake_case ) for i in range(len(__snake_case ) ): a : Optional[Any] = x[i] / self.k self.sk += prk a : str = (self.L - 1) * self.sk if self.rem != 0: a : Optional[int] = int(last % last ) a : int = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(__snake_case ) a : str = int(np.ma.count(self.img ) / self.img[1].size ) a : Optional[int] = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): a : Any = self.img[j][i] if num != self.last_list[num]: a : str = self.last_list[num] cva.imwrite('output_data/output.jpg' , self.img ) def lowercase_ ( self : Dict ): plt.hist(self.img.ravel() , 2_56 , [0, 2_56] ) def lowercase_ ( self : List[Any] ): cva.imshow('Output-Image' , self.img ) cva.imshow('Input-Image' , self.original_image ) cva.waitKey(50_00 ) cva.destroyAllWindows() if __name__ == "__main__": lowerCAmelCase: Optional[Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg') lowerCAmelCase: Tuple = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
297
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class a__( metaclass=lowerCamelCase__ ): lowercase__ = ["""flax""", """transformers"""] def __init__( self : str , *__snake_case : List[str] , **__snake_case : Optional[Any] ): requires_backends(self , ['flax', 'transformers'] ) @classmethod def lowercase_ ( cls : int , *__snake_case : Union[str, Any] , **__snake_case : Any ): requires_backends(cls , ['flax', 'transformers'] ) @classmethod def lowercase_ ( cls : Union[str, Any] , *__snake_case : Dict , **__snake_case : str ): requires_backends(cls , ['flax', 'transformers'] ) class a__( metaclass=lowerCamelCase__ ): lowercase__ = ["""flax""", """transformers"""] def __init__( self : str , *__snake_case : List[str] , **__snake_case : List[str] ): requires_backends(self , ['flax', 'transformers'] ) @classmethod def lowercase_ ( cls : Optional[Any] , *__snake_case : Optional[int] , **__snake_case : Optional[int] ): requires_backends(cls , ['flax', 'transformers'] ) @classmethod def lowercase_ ( cls : int , *__snake_case : Dict , **__snake_case : int ): requires_backends(cls , ['flax', 'transformers'] ) class a__( metaclass=lowerCamelCase__ ): lowercase__ = ["""flax""", """transformers"""] def __init__( self : int , *__snake_case : str , **__snake_case : Tuple ): requires_backends(self , ['flax', 'transformers'] ) @classmethod def lowercase_ ( cls : List[str] , *__snake_case : int , **__snake_case : Tuple ): requires_backends(cls , ['flax', 'transformers'] ) @classmethod def lowercase_ ( cls : List[str] , *__snake_case : Tuple , **__snake_case : Optional[Any] ): requires_backends(cls , ['flax', 'transformers'] ) class a__( metaclass=lowerCamelCase__ ): lowercase__ = ["""flax""", """transformers"""] def __init__( self : Dict , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ): requires_backends(self , ['flax', 'transformers'] ) @classmethod def lowercase_ ( cls : Optional[int] , *__snake_case : str , **__snake_case : Any ): requires_backends(cls , ['flax', 'transformers'] ) @classmethod def lowercase_ ( cls : List[str] , *__snake_case : int , **__snake_case : Tuple ): requires_backends(cls , ['flax', 'transformers'] )
297
'''simple docstring''' import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class a__: def __init__( self : List[Any] , __snake_case : Union[str, Any] ): if isinstance(__snake_case , __snake_case ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden a : str = deepcopy(__snake_case ) elif os.path.exists(__snake_case ): with io.open(__snake_case , 'r' , encoding='utf-8' ) as f: a : Optional[Any] = json.load(__snake_case ) else: try: a : Any = baseaa.urlsafe_baadecode(__snake_case ).decode('utf-8' ) a : Union[str, Any] = json.loads(__snake_case ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" ) a : List[str] = config self.set_stage_and_offload() def lowercase_ ( self : List[str] ): # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs to know the dtype, and some other hparams. a : Dict = self.get_value('zero_optimization.stage' , -1 ) # offload a : str = False if self.is_zeroa() or self.is_zeroa(): a : Union[str, Any] = set(['cpu', 'nvme'] ) a : Optional[Any] = set( [ self.get_value('zero_optimization.offload_optimizer.device' ), self.get_value('zero_optimization.offload_param.device' ), ] ) if len(offload_devices & offload_devices_valid ) > 0: a : List[str] = True def lowercase_ ( self : Optional[Any] , __snake_case : Optional[Any] ): a : str = self.config # find the config node of interest if it exists a : List[str] = ds_key_long.split('.' ) a : Dict = nodes.pop() for node in nodes: a : List[Any] = config.get(__snake_case ) if config is None: return None, ds_key return config, ds_key def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any=None ): a , a : List[Any] = self.find_config_node(__snake_case ) if config is None: return default return config.get(__snake_case , __snake_case ) def lowercase_ ( self : int , __snake_case : Optional[Any] , __snake_case : List[str]=False ): a : Optional[Any] = self.config # find the config node of interest if it exists a : List[str] = ds_key_long.split('.' ) for node in nodes: a : str = config a : Dict = config.get(__snake_case ) if config is None: if must_exist: raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" ) else: return # if found remove it if parent_config is not None: parent_config.pop(__snake_case ) def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] ): a : Union[str, Any] = self.get_value(__snake_case ) return False if value is None else bool(__snake_case ) def lowercase_ ( self : Union[str, Any] , __snake_case : str ): a : Optional[Any] = self.get_value(__snake_case ) return False if value is None else not bool(__snake_case ) def lowercase_ ( self : Optional[Any] ): return self._stage == 2 def lowercase_ ( self : Union[str, Any] ): return self._stage == 3 def lowercase_ ( self : str ): return self._offload class a__: def __init__( self : Tuple , __snake_case : str ): a : Optional[Any] = engine def lowercase_ ( self : Union[str, Any] , __snake_case : str , **__snake_case : Tuple ): # runs backpropagation and handles mixed precision self.engine.backward(__snake_case , **__snake_case ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class a__( lowerCamelCase__ ): def __init__( self : str , __snake_case : List[str] ): super().__init__(__snake_case , device_placement=__snake_case , scaler=__snake_case ) a : Optional[Any] = hasattr(self.optimizer , 'overflow' ) def lowercase_ ( self : Dict , __snake_case : Dict=None ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def lowercase_ ( self : Optional[Any] ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def lowercase_ ( self : Tuple ): if self.__has_overflow__: return self.optimizer.overflow return False class a__( lowerCamelCase__ ): def __init__( self : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ): super().__init__(__snake_case , __snake_case ) def lowercase_ ( self : Any ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class a__: def __init__( self : List[Any] , __snake_case : str , __snake_case : Dict=0.001 , __snake_case : Union[str, Any]=0 , **__snake_case : List[Any] ): a : Optional[Any] = params a : str = lr a : List[str] = weight_decay a : str = kwargs class a__: def __init__( self : str , __snake_case : Optional[Any] , __snake_case : List[str]=None , __snake_case : Tuple=0 , **__snake_case : Any ): a : Union[str, Any] = optimizer a : Any = total_num_steps a : List[str] = warmup_num_steps a : int = kwargs
297
1
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration lowerCAmelCase: Optional[int] = pytest.mark.integration lowerCAmelCase: Any = {'comet'} lowerCAmelCase: List[str] = importlib.util.find_spec('fairseq') is not None lowerCAmelCase: List[str] = {'code_eval'} lowerCAmelCase: Optional[Any] = os.name == 'nt' lowerCAmelCase: Any = {'bertscore', 'frugalscore', 'perplexity'} lowerCAmelCase: int = importlib.util.find_spec('transformers') is not None def lowerCamelCase__ ( _A ): @wraps(_A ) def wrapper(self , _A ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('"test requires Fairseq"' ) else: test_case(self , _A ) return wrapper def lowerCamelCase__ ( _A ): @wraps(_A ) def wrapper(self , _A ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('"test requires transformers"' ) else: test_case(self , _A ) return wrapper def lowerCamelCase__ ( _A ): @wraps(_A ) def wrapper(self , _A ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('"test not supported on Windows"' ) else: test_case(self , _A ) return wrapper def lowerCamelCase__ ( ): a : Optional[int] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) @local class a__( parameterized.TestCase ): lowercase__ = {} lowercase__ = None @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' ) def lowercase_ ( self : Union[str, Any] , __snake_case : Union[str, Any] ): a : List[Any] = '[...]' a : Union[str, Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , __snake_case ) ).module_path ) a : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=__snake_case ) # check parameters a : Optional[int] = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(__snake_case , metric_module.__name__ ): with self.use_local_metrics(): try: a : Optional[int] = doctest.testmod(__snake_case , verbose=__snake_case , raise_on_error=__snake_case ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def lowercase_ ( self : Any , __snake_case : Dict ): a : str = '[...]' a : List[str] = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , __snake_case ) ).module_path ) # run doctest with self.use_local_metrics(): a : Optional[int] = doctest.testmod(__snake_case , verbose=__snake_case , raise_on_error=__snake_case ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def lowercase_ ( self : Any , __snake_case : str , __snake_case : Any ): if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](__snake_case ): yield else: yield @contextmanager def lowercase_ ( self : Optional[int] ): def load_local_metric(__snake_case : Dict , *__snake_case : str , **__snake_case : Optional[int] ): return load_metric(os.path.join('metrics' , __snake_case ) , *__snake_case , **__snake_case ) with patch('datasets.load_metric' ) as mock_load_metric: a : Any = load_local_metric yield @classmethod def lowercase_ ( cls : str , __snake_case : int ): def wrapper(__snake_case : int ): a : str = contextmanager(__snake_case ) a : Tuple = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('bleurt' ) def lowerCamelCase__ ( _A ): import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags class a__( lowerCamelCase__ ): def lowercase_ ( self : Optional[int] , __snake_case : int ): assert len(input_dict['input_ids'] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('bleurt.score._create_predictor' ) as mock_create_predictor: a : Optional[int] = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('bertscore' ) def lowerCamelCase__ ( _A ): import torch def bert_cos_score_idf(_A , _A , *_A , **_A ): return torch.tensor([[1.0, 1.0, 1.0]] * len(_A ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('bert_score.scorer.get_model' ), patch( 'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf: a : str = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('comet' ) def lowerCamelCase__ ( _A ): def load_from_checkpoint(_A ): class a__: def lowercase_ ( self : List[str] , __snake_case : List[str] , *__snake_case : Optional[Any] , **__snake_case : Optional[Any] ): assert len(__snake_case ) == 2 a : int = [0.19, 0.92] return scores, sum(__snake_case ) / len(__snake_case ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('comet.download_model' ) as mock_download_model: a : Any = None with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint: a : Union[str, Any] = load_from_checkpoint yield def lowerCamelCase__ ( ): a : List[Any] = load_metric(os.path.join('metrics' , 'seqeval' ) ) a : int = 'ERROR' a : Any = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(_A , match=re.escape(_A ) ): metric.compute(predictions=[] , references=[] , scheme=_A )
297
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) lowerCAmelCase: int = [ 'cross_validation.py', 'gradient_accumulation.py', 'local_sgd.py', 'multi_process_metrics.py', 'memory.py', 'automatic_gradient_accumulation.py', 'fsdp_with_peak_mem_tracking.py', 'deepspeed_with_config_support.py', 'megatron_lm_gpt_pretraining.py', ] class a__( unittest.TestCase ): def lowercase_ ( self : int , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ): a : Optional[int] = None a : Tuple = os.path.abspath(os.path.join('examples' , 'by_feature' ) ) a : List[str] = os.path.abspath('examples' ) for item in os.listdir(__snake_case ): if item not in EXCLUDE_EXAMPLES: a : int = os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ) and ".py" in item_path: with self.subTest( tested_script=__snake_case , feature_script=__snake_case , tested_section='main()' if parser_only else 'training_function()' , ): a : List[Any] = compare_against_test( os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case ) a : Union[str, Any] = '\n'.join(__snake_case ) if special_strings is not None: for string in special_strings: a : Union[str, Any] = diff.replace(__snake_case , '' ) self.assertEqual(__snake_case , '' ) def lowercase_ ( self : Optional[Any] ): self.one_complete_example('complete_nlp_example.py' , __snake_case ) self.one_complete_example('complete_nlp_example.py' , __snake_case ) def lowercase_ ( self : Any ): a : Dict = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) ) a : int = [ ' ' * 16 + '{\n\n', ' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n', ' ' * 20 + '"f1": eval_metric["f1"],\n\n', ' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n', ' ' * 20 + '"epoch": epoch,\n\n', ' ' * 16 + '},\n\n', ' ' * 16 + 'step=epoch,\n', ' ' * 12, ' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n', ] self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case ) self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case ) @mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} ) class a__( lowerCamelCase__ ): lowercase__ = False @classmethod def lowercase_ ( cls : Optional[int] ): super().setUpClass() a : List[str] = tempfile.mkdtemp() a : Tuple = os.path.join(cls._tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) a : Optional[int] = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def lowercase_ ( cls : Optional[int] ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def lowercase_ ( self : Tuple ): a : Union[str, Any] = F""" examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) ) def lowercase_ ( self : Dict ): a : Union[str, Any] = F""" examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} """.split() a : int = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) ) def lowercase_ ( self : Any ): a : Tuple = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )} """.split() a : int = run_command(self._launch_args + testargs , return_stdout=__snake_case ) self.assertNotIn('epoch 0:' , __snake_case ) self.assertIn('epoch 1:' , __snake_case ) def lowercase_ ( self : int ): a : Optional[int] = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )} """.split() a : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=__snake_case ) if torch.cuda.is_available(): a : Any = torch.cuda.device_count() else: a : str = 1 if num_processes > 1: self.assertNotIn('epoch 0:' , __snake_case ) self.assertIn('epoch 1:' , __snake_case ) else: self.assertIn('epoch 0:' , __snake_case ) self.assertIn('epoch 1:' , __snake_case ) @slow def lowercase_ ( self : Tuple ): a : Tuple = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split() with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ): a : Any = run_command(self._launch_args + testargs , return_stdout=__snake_case ) a : Optional[Any] = re.findall('({.+})' , __snake_case ) a : str = [r for r in results if 'accuracy' in r][-1] a : str = ast.literal_eval(__snake_case ) self.assertGreaterEqual(results['accuracy'] , 0.75 ) def lowercase_ ( self : Optional[int] ): a : int = ['examples/by_feature/multi_process_metrics.py'] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def lowercase_ ( self : Optional[int] ): with tempfile.TemporaryDirectory() as tmpdir: a : Optional[Any] = F""" examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__snake_case , 'tracking' ) ) ) def lowercase_ ( self : List[str] ): a : Optional[Any] = ['examples/by_feature/gradient_accumulation.py'] run_command(self._launch_args + testargs ) def lowercase_ ( self : int ): a : Optional[Any] = ['examples/by_feature/local_sgd.py'] run_command(self._launch_args + testargs )
297
1
'''simple docstring''' import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class a__( unittest.TestCase ): def lowercase_ ( self : List[Any] ): with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights a : Any = FlaxDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=__snake_case , cache_dir=__snake_case ) a : str = [t[-1] for t in os.walk(os.path.join(__snake_case , os.listdir(__snake_case )[0] , 'snapshots' ) )] a : str = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('.bin' ) for f in files ) @slow @require_flax class a__( unittest.TestCase ): def lowercase_ ( self : Optional[int] ): a , a : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=__snake_case ) a : List[Any] = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) a : List[str] = jax.random.PRNGKey(0 ) a : Optional[int] = 4 a : Tuple = jax.device_count() a : Tuple = num_samples * [prompt] a : Optional[Any] = pipeline.prepare_inputs(__snake_case ) # shard inputs and rng a : Optional[Any] = replicate(__snake_case ) a : List[Any] = jax.random.split(__snake_case , __snake_case ) a : Tuple = shard(__snake_case ) a : Optional[Any] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3 assert np.abs(np.abs(__snake_case , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1 a : Tuple = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(__snake_case ) == num_samples def lowercase_ ( self : List[str] ): a , a : Any = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=__snake_case ) a : Optional[int] = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) a : Any = jax.random.PRNGKey(0 ) a : Tuple = 50 a : str = jax.device_count() a : Dict = num_samples * [prompt] a : int = pipeline.prepare_inputs(__snake_case ) # shard inputs and rng a : Dict = replicate(__snake_case ) a : Tuple = jax.random.split(__snake_case , __snake_case ) a : int = shard(__snake_case ) a : Any = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images assert images.shape == (num_samples, 1, 5_12, 5_12, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3 assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1 def lowercase_ ( self : Dict ): a , a : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=__snake_case ) a : Tuple = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) a : Optional[int] = jax.random.PRNGKey(0 ) a : Optional[int] = 50 a : str = jax.device_count() a : Any = num_samples * [prompt] a : Any = pipeline.prepare_inputs(__snake_case ) # shard inputs and rng a : Any = replicate(__snake_case ) a : int = jax.random.split(__snake_case , __snake_case ) a : Tuple = shard(__snake_case ) a : Any = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images assert images.shape == (num_samples, 1, 5_12, 5_12, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def lowercase_ ( self : Union[str, Any] ): a , a : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa ) a : Tuple = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) a : List[str] = jax.random.PRNGKey(0 ) a : int = 50 a : Dict = jax.device_count() a : str = num_samples * [prompt] a : Union[str, Any] = pipeline.prepare_inputs(__snake_case ) # shard inputs and rng a : Dict = replicate(__snake_case ) a : List[Any] = jax.random.split(__snake_case , __snake_case ) a : str = shard(__snake_case ) a : Union[str, Any] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images assert images.shape == (num_samples, 1, 5_12, 5_12, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3 assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def lowercase_ ( self : str ): a : List[str] = FlaxDDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=__snake_case , steps_offset=1 , ) a , a : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=__snake_case , safety_checker=__snake_case , ) a : List[str] = scheduler.create_state() a : Optional[Any] = scheduler_state a : List[str] = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) a : str = jax.random.PRNGKey(0 ) a : Optional[Any] = 50 a : Union[str, Any] = jax.device_count() a : Union[str, Any] = num_samples * [prompt] a : Tuple = pipeline.prepare_inputs(__snake_case ) # shard inputs and rng a : Any = replicate(__snake_case ) a : Union[str, Any] = jax.random.split(__snake_case , __snake_case ) a : List[Any] = shard(__snake_case ) a : Dict = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images assert images.shape == (num_samples, 1, 5_12, 5_12, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3 assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1 def lowercase_ ( self : Any ): a : Optional[int] = ( 'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of' ' field, close up, split lighting, cinematic' ) a : Optional[Any] = jax.device_count() a : Any = num_samples * [prompt] a : int = jax.random.split(jax.random.PRNGKey(0 ) , __snake_case ) a , a : List[str] = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=__snake_case , ) a : Optional[int] = replicate(__snake_case ) a : Dict = pipeline.prepare_inputs(__snake_case ) a : Tuple = shard(__snake_case ) a : List[str] = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images assert images.shape == (num_samples, 1, 5_12, 5_12, 3) a : Union[str, Any] = images[2, 0, 2_56, 10:17, 1] # With memory efficient attention a , a : List[str] = FlaxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=__snake_case , use_memory_efficient_attention=__snake_case , ) a : str = replicate(__snake_case ) a : List[Any] = pipeline.prepare_inputs(__snake_case ) a : Union[str, Any] = shard(__snake_case ) a : List[Any] = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3) a : Optional[int] = images[2, 0, 2_56, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1e-2
297
'''simple docstring''' import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList lowerCAmelCase: List[str] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class a__( lowerCamelCase__ ): def __init__( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : Optional[Any]=1 ): a : Union[str, Any] = tokenizer a : Union[str, Any] = dataset a : Any = len(__snake_case ) if n_tasks is None else n_tasks a : List[str] = n_copies def __iter__( self : str ): a : List[Any] = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() ) a : Dict = self.tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class a__( lowerCamelCase__ ): def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str ): a : Dict = start_length a : Dict = eof_strings a : str = tokenizer def __call__( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : Union[str, Any] ): a : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) a : Optional[int] = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(__snake_case ) def lowerCamelCase__ ( _A ): a : Optional[Any] = re.split('(%s)' % '|'.join(_A ) , _A ) # last string should be "" return "".join(string_list[:-2] ) def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A=20 , **_A ): a : Optional[Any] = defaultdict(_A ) # dict of list of generated tokens for step, batch in tqdm(enumerate(_A ) ): with torch.no_grad(): a : Optional[Any] = batch['ids'].shape[-1] a : Optional[Any] = accelerator.unwrap_model(_A ).generate( input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_A , **_A ) # each task is generated batch_size times a : Tuple = batch['task_id'].repeat(_A ) a : List[Any] = accelerator.pad_across_processes( _A , dim=1 , pad_index=tokenizer.pad_token_id ) a , a : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) ) a : List[str] = generated_tokens.cpu().numpy() a : int = generated_tasks.cpu().numpy() for task, generated_tokens in zip(_A , _A ): gen_token_dict[task].append(_A ) a : Any = [[] for _ in range(_A )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: a : Optional[int] = tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A ) code_gens[task].append(remove_last_block(_A ) ) return code_gens def lowerCamelCase__ ( ): # Setup configuration a : Dict = HfArgumentParser(_A ) a : Any = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric a : List[Any] = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing a : int = 'false' if args.num_workers is None: a : Dict = multiprocessing.cpu_count() # Use dataset load to feed to accelerate a : List[Any] = Accelerator() set_seed(args.seed , device_specific=_A ) # Load model and tokenizer a : Dict = AutoTokenizer.from_pretrained(args.model_ckpt ) a : str = tokenizer.eos_token a : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings a : Optional[Any] = { 'do_sample': args.do_sample, 'temperature': args.temperature, 'max_new_tokens': args.max_new_tokens, 'top_p': args.top_p, 'top_k': args.top_k, 'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _A , _A )] ), } # Load evaluation dataset and metric a : Optional[int] = load_dataset('openai_humaneval' ) a : Optional[Any] = load_metric('code_eval' ) a : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] ) a : Optional[Any] = args.n_samples // args.batch_size a : Any = TokenizedDataset(_A , human_eval['test'] , n_copies=_A , n_tasks=_A ) # do not confuse args.batch_size, which is actually the num_return_sequences a : int = DataLoader(_A , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: a : int = code_eval_metric.compute(references=[''] , predictions=[['']] ) except ValueError as exception: print( 'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`' ' flag to enable code evaluation.' ) raise exception a , a : int = accelerator.prepare(_A , _A ) a : int = complete_code( _A , _A , _A , _A , n_tasks=_A , batch_size=args.batch_size , **_A , ) if accelerator.is_main_process: a : List[str] = [] for task in tqdm(range(_A ) ): a : int = human_eval['test'][task]['test'] a : int = f"""check({human_eval["test"][task]["entry_point"]})""" references.append('\n' + test_func + '\n' + entry_point ) # Evaluate completions with "code_eval" metric a , a : Tuple = code_eval_metric.compute( references=_A , predictions=_A , num_workers=args.num_workers ) print(f"""Results: {pass_at_k}""" ) # Save results to json file with open(args.output_file , 'w' ) as fp: json.dump(_A , _A ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
297
1
'''simple docstring''' import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase: List[Any] = logging.get_logger(__name__) set_seed(7_7_0) lowerCAmelCase: Optional[Any] = { 'c_attn': 'att_proj', 'c_proj': 'out_proj', 'c_fc': 'in_proj', 'transformer.': '', 'h.': 'layers.', 'ln_1': 'layernorm_1', 'ln_2': 'layernorm_2', 'ln_f': 'layernorm_final', 'wpe': 'position_embeds_layer', 'wte': 'input_embeds_layer', } lowerCAmelCase: Any = { 'text_small': { 'repo_id': 'suno/bark', 'file_name': 'text.pt', }, 'coarse_small': { 'repo_id': 'suno/bark', 'file_name': 'coarse.pt', }, 'fine_small': { 'repo_id': 'suno/bark', 'file_name': 'fine.pt', }, 'text': { 'repo_id': 'suno/bark', 'file_name': 'text_2.pt', }, 'coarse': { 'repo_id': 'suno/bark', 'file_name': 'coarse_2.pt', }, 'fine': { 'repo_id': 'suno/bark', 'file_name': 'fine_2.pt', }, } lowerCAmelCase: Dict = os.path.dirname(os.path.abspath(__file__)) lowerCAmelCase: Dict = os.path.join(os.path.expanduser('~'), '.cache') lowerCAmelCase: str = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0') def lowerCamelCase__ ( _A , _A=False ): a : Union[str, Any] = model_type if use_small: key += "_small" return os.path.join(_A , REMOTE_MODEL_PATHS[key]['file_name'] ) def lowerCamelCase__ ( _A , _A ): os.makedirs(_A , exist_ok=_A ) hf_hub_download(repo_id=_A , filename=_A , local_dir=_A ) def lowerCamelCase__ ( _A , _A , _A=False , _A="text" ): if model_type == "text": a : List[str] = BarkSemanticModel a : List[Any] = BarkSemanticConfig a : Any = BarkSemanticGenerationConfig elif model_type == "coarse": a : Optional[int] = BarkCoarseModel a : Dict = BarkCoarseConfig a : Optional[int] = BarkCoarseGenerationConfig elif model_type == "fine": a : int = BarkFineModel a : str = BarkFineConfig a : Union[str, Any] = BarkFineGenerationConfig else: raise NotImplementedError() a : List[Any] = f"""{model_type}_small""" if use_small else model_type a : Optional[int] = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(_A ): logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" ) _download(model_info['repo_id'] , model_info['file_name'] ) a : Any = torch.load(_A , map_location=_A ) # this is a hack a : Tuple = checkpoint['model_args'] if "input_vocab_size" not in model_args: a : Tuple = model_args['vocab_size'] a : Optional[int] = model_args['vocab_size'] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments a : Dict = model_args.pop('n_head' ) a : Tuple = model_args.pop('n_embd' ) a : Optional[int] = model_args.pop('n_layer' ) a : Optional[Any] = ConfigClass(**checkpoint['model_args'] ) a : Any = ModelClass(config=_A ) a : Tuple = GenerationConfigClass() a : Optional[int] = model_generation_config a : List[Any] = checkpoint['model'] # fixup checkpoint a : Union[str, Any] = '_orig_mod.' for k, v in list(state_dict.items() ): if k.startswith(_A ): # replace part of the key with corresponding layer name in HF implementation a : int = k[len(_A ) :] for old_layer_name in new_layer_name_dict: a : Union[str, Any] = new_k.replace(_A , new_layer_name_dict[old_layer_name] ) a : Union[str, Any] = state_dict.pop(_A ) a : Tuple = set(state_dict.keys() ) - set(model.state_dict().keys() ) a : Tuple = {k for k in extra_keys if not k.endswith('.attn.bias' )} a : Optional[int] = set(model.state_dict().keys() ) - set(state_dict.keys() ) a : Tuple = {k for k in missing_keys if not k.endswith('.attn.bias' )} if len(_A ) != 0: raise ValueError(f"""extra keys found: {extra_keys}""" ) if len(_A ) != 0: raise ValueError(f"""missing keys: {missing_keys}""" ) model.load_state_dict(_A , strict=_A ) a : Dict = model.num_parameters(exclude_embeddings=_A ) a : Optional[int] = checkpoint['best_val_loss'].item() logger.info(f"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(_A , 3 )} loss""" ) model.eval() model.to(_A ) del checkpoint, state_dict return model def lowerCamelCase__ ( _A , _A=False , _A="text" ): if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() a : Union[str, Any] = 'cpu' # do conversion on cpu a : int = _get_ckpt_path(_A , use_small=_A ) a : Optional[Any] = _load_model(_A , _A , model_type=_A , use_small=_A ) # load bark initial model a : List[str] = _bark_load_model(_A , 'cpu' , model_type=_A , use_small=_A ) if model_type == "text": a : List[str] = bark_model['model'] if model.num_parameters(exclude_embeddings=_A ) != bark_model.get_num_params(): raise ValueError('initial and new models don\'t have the same number of parameters' ) # check if same output as the bark model a : Optional[int] = 5 a : Tuple = 10 if model_type in ["text", "coarse"]: a : Optional[int] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int ) a : Optional[int] = bark_model(_A )[0] a : str = model(_A ) # take last logits a : Optional[int] = output_new_model_total.logits[:, [-1], :] else: a : Tuple = 3 a : Optional[int] = 8 a : Tuple = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) a : Dict = model(_A , _A ) a : Tuple = bark_model(_A , _A ) a : Optional[Any] = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError('initial and new outputs don\'t have the same shape' ) if (output_new_model - output_old_model).abs().max().item() > 1E-3: raise ValueError('initial and new outputs are not equal' ) Path(_A ).mkdir(exist_ok=_A ) model.save_pretrained(_A ) def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A , ): a : Optional[int] = os.path.join(_A , _A ) a : int = BarkSemanticConfig.from_pretrained(os.path.join(_A , 'config.json' ) ) a : str = BarkCoarseConfig.from_pretrained(os.path.join(_A , 'config.json' ) ) a : List[str] = BarkFineConfig.from_pretrained(os.path.join(_A , 'config.json' ) ) a : Optional[int] = EncodecConfig.from_pretrained('facebook/encodec_24khz' ) a : Dict = BarkSemanticModel.from_pretrained(_A ) a : Optional[int] = BarkCoarseModel.from_pretrained(_A ) a : Optional[Any] = BarkFineModel.from_pretrained(_A ) a : int = EncodecModel.from_pretrained('facebook/encodec_24khz' ) a : Optional[Any] = BarkConfig.from_sub_model_configs( _A , _A , _A , _A ) a : Union[str, Any] = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) a : Optional[int] = BarkModel(_A ) a : Optional[int] = semantic a : Dict = coarseAcoustic a : Tuple = fineAcoustic a : int = codec a : int = bark_generation_config Path(_A ).mkdir(exist_ok=_A ) bark.save_pretrained(_A , repo_id=_A , push_to_hub=_A ) if __name__ == "__main__": lowerCAmelCase: Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('model_type', type=str, help='text, coarse or fine.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.') lowerCAmelCase: Union[str, Any] = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
297
'''simple docstring''' import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def lowerCamelCase__ ( _A , _A , _A ): if isinstance(_A , torch.Tensor ): return image elif isinstance(_A , PIL.Image.Image ): a : Any = [image] if isinstance(image[0] , PIL.Image.Image ): a : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image] a : int = np.concatenate(_A , axis=0 ) a : int = np.array(_A ).astype(np.floataa ) / 255.0 a : str = image.transpose(0 , 3 , 1 , 2 ) a : str = 2.0 * image - 1.0 a : Optional[int] = torch.from_numpy(_A ) elif isinstance(image[0] , torch.Tensor ): a : Optional[Any] = torch.cat(_A , dim=0 ) return image def lowerCamelCase__ ( _A , _A , _A , _A=0.9995 ): if not isinstance(_A , np.ndarray ): a : Dict = True a : Optional[Any] = va.device a : Optional[int] = va.cpu().numpy() a : Union[str, Any] = va.cpu().numpy() a : Any = np.sum(va * va / (np.linalg.norm(_A ) * np.linalg.norm(_A )) ) if np.abs(_A ) > DOT_THRESHOLD: a : Any = (1 - t) * va + t * va else: a : Any = np.arccos(_A ) a : Tuple = np.sin(_A ) a : Optional[Any] = theta_a * t a : List[Any] = np.sin(_A ) a : Dict = np.sin(theta_a - theta_t ) / sin_theta_a a : int = sin_theta_t / sin_theta_a a : Any = sa * va + sa * va if inputs_are_torch: a : Dict = torch.from_numpy(_A ).to(_A ) return va def lowerCamelCase__ ( _A , _A ): a : Optional[int] = F.normalize(_A , dim=-1 ) a : str = F.normalize(_A , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def lowerCamelCase__ ( _A , _A ): for param in model.parameters(): a : int = value class a__( lowerCamelCase__ ): def __init__( self : str , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __snake_case : CLIPFeatureExtractor , __snake_case : List[str]=None , __snake_case : List[str]=None , __snake_case : List[Any]=None , ): super().__init__() self.register_modules( vae=__snake_case , text_encoder=__snake_case , clip_model=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , feature_extractor=__snake_case , coca_model=__snake_case , coca_tokenizer=__snake_case , coca_transform=__snake_case , ) a : Optional[Any] = ( feature_extractor.size if isinstance(feature_extractor.size , __snake_case ) else feature_extractor.size['shortest_edge'] ) a : Optional[int] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , __snake_case ) set_requires_grad(self.clip_model , __snake_case ) def lowercase_ ( self : int , __snake_case : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory a : Union[str, Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__snake_case ) def lowercase_ ( self : Union[str, Any] ): self.enable_attention_slicing(__snake_case ) def lowercase_ ( self : Optional[Any] ): set_requires_grad(self.vae , __snake_case ) def lowercase_ ( self : Tuple ): set_requires_grad(self.vae , __snake_case ) def lowercase_ ( self : int ): set_requires_grad(self.unet , __snake_case ) def lowercase_ ( self : Union[str, Any] ): set_requires_grad(self.unet , __snake_case ) def lowercase_ ( self : int , __snake_case : Dict , __snake_case : str , __snake_case : Optional[int] ): # get the original timestep using init_timestep a : Optional[Any] = min(int(num_inference_steps * strength ) , __snake_case ) a : Union[str, Any] = max(num_inference_steps - init_timestep , 0 ) a : List[Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def lowercase_ ( self : Dict , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any]=None ): if not isinstance(__snake_case , torch.Tensor ): raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(__snake_case )}""" ) a : Optional[Any] = image.to(device=__snake_case , dtype=__snake_case ) if isinstance(__snake_case , __snake_case ): a : Optional[int] = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case ) ] a : Optional[Any] = torch.cat(__snake_case , dim=0 ) else: a : Union[str, Any] = self.vae.encode(__snake_case ).latent_dist.sample(__snake_case ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : List[str] = 0.18215 * init_latents a : str = init_latents.repeat_interleave(__snake_case , dim=0 ) a : Dict = randn_tensor(init_latents.shape , generator=__snake_case , device=__snake_case , dtype=__snake_case ) # get latents a : Dict = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case ) a : int = init_latents return latents def lowercase_ ( self : List[str] , __snake_case : Dict ): a : List[Any] = self.coca_transform(__snake_case ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): a : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) a : Union[str, Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' ) def lowercase_ ( self : Tuple , __snake_case : Any , __snake_case : Optional[Any] ): a : List[Any] = self.feature_extractor.preprocess(__snake_case ) a : Optional[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half() a : int = self.clip_model.get_image_features(__snake_case ) a : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case ) a : Tuple = image_embeddings_clip.repeat_interleave(__snake_case , dim=0 ) return image_embeddings_clip @torch.enable_grad() def lowercase_ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[Any] , ): a : Optional[Any] = latents.detach().requires_grad_() a : List[Any] = self.scheduler.scale_model_input(__snake_case , __snake_case ) # predict the noise residual a : Any = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): a : int = self.scheduler.alphas_cumprod[timestep] a : Any = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf a : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 a : Tuple = torch.sqrt(__snake_case ) a : str = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , __snake_case ): a : List[Any] = self.scheduler.sigmas[index] a : Optional[int] = latents - sigma * noise_pred else: raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : Union[str, Any] = 1 / 0.18215 * sample a : str = self.vae.decode(__snake_case ).sample a : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) a : Tuple = transforms.Resize(self.feature_extractor_size )(__snake_case ) a : List[str] = self.normalize(__snake_case ).to(latents.dtype ) a : List[str] = self.clip_model.get_image_features(__snake_case ) a : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case ) a : int = spherical_dist_loss(__snake_case , __snake_case ).mean() * clip_guidance_scale a : List[str] = -torch.autograd.grad(__snake_case , __snake_case )[0] if isinstance(self.scheduler , __snake_case ): a : List[Any] = latents.detach() + grads * (sigma**2) a : Optional[int] = noise_pred_original else: a : List[Any] = noise_pred_original - torch.sqrt(__snake_case ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : Optional[int] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[int] = 5_12 , __snake_case : Optional[int] = 5_12 , __snake_case : float = 0.6 , __snake_case : Optional[int] = 50 , __snake_case : Optional[float] = 7.5 , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[float] = 1_00 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : float = 0.8 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , ): if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size: raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(__snake_case )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(__snake_case , torch.Generator ) and batch_size > 1: a : Dict = [generator] + [None] * (batch_size - 1) a : Any = [ ('model', self.coca_model is None), ('tokenizer', self.coca_tokenizer is None), ('transform', self.coca_transform is None), ] a : List[str] = [x[0] for x in coca_is_none if x[1]] a : List[str] = ', '.join(__snake_case ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(__snake_case ): raise ValueError( F"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) a : int = self.get_image_description(__snake_case ) if style_prompt is None: if len(__snake_case ): raise ValueError( F"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) a : Union[str, Any] = self.get_image_description(__snake_case ) # get prompt text embeddings for content and style a : Optional[Any] = self.tokenizer( __snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , ) a : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] a : Dict = self.tokenizer( __snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , ) a : Dict = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] a : Any = slerp(__snake_case , __snake_case , __snake_case ) # duplicate text embeddings for each generation per prompt a : Optional[Any] = text_embeddings.repeat_interleave(__snake_case , dim=0 ) # set timesteps a : int = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) a : Any = {} if accepts_offset: a : Optional[Any] = 1 self.scheduler.set_timesteps(__snake_case , **__snake_case ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) a , a : Tuple = self.get_timesteps(__snake_case , __snake_case , self.device ) a : Optional[int] = timesteps[:1].repeat(__snake_case ) # Preprocess image a : Optional[Any] = preprocess(__snake_case , __snake_case , __snake_case ) a : List[Any] = self.prepare_latents( __snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case ) a : str = preprocess(__snake_case , __snake_case , __snake_case ) a : Union[str, Any] = self.prepare_latents( __snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case ) a : Union[str, Any] = slerp(__snake_case , __snake_case , __snake_case ) if clip_guidance_scale > 0: a : Dict = self.get_clip_image_embeddings(__snake_case , __snake_case ) a : int = self.get_clip_image_embeddings(__snake_case , __snake_case ) a : List[str] = slerp( __snake_case , __snake_case , __snake_case ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. a : int = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: a : Any = content_text_input.input_ids.shape[-1] a : List[Any] = self.tokenizer([''] , padding='max_length' , max_length=__snake_case , return_tensors='pt' ) a : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt a : Dict = uncond_embeddings.repeat_interleave(__snake_case , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes a : Any = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. a : List[str] = (batch_size, self.unet.config.in_channels, height // 8, width // 8) a : List[str] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps a : int = torch.randn(__snake_case , generator=__snake_case , device='cpu' , dtype=__snake_case ).to( self.device ) else: a : Optional[int] = torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case ) else: if latents.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) a : List[str] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler a : Any = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] a : Optional[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) a : Union[str, Any] = {} if accepts_eta: a : List[str] = eta # check if the scheduler accepts generator a : List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: a : Any = generator with self.progress_bar(total=__snake_case ): for i, t in enumerate(__snake_case ): # expand the latents if we are doing classifier free guidance a : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a : Dict = self.scheduler.scale_model_input(__snake_case , __snake_case ) # predict the noise residual a : List[Any] = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample # perform classifier free guidance if do_classifier_free_guidance: a , a : List[str] = noise_pred.chunk(2 ) a : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: a : Optional[Any] = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) a , a : Union[str, Any] = self.cond_fn( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) # compute the previous noisy sample x_t -> x_t-1 a : Any = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : Tuple = 1 / 0.18215 * latents a : Optional[int] = self.vae.decode(__snake_case ).sample a : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) a : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a : str = self.numpy_to_pil(__snake_case ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case )
297
1
'''simple docstring''' def lowerCamelCase__ ( _A ): return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
297
'''simple docstring''' def lowerCamelCase__ ( _A , _A , _A , _A , _A , ): a : Dict = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('All input parameters must be positive' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('Relative densities cannot be greater than one' ) else: a : Union[str, Any] = 1 - (matter_density + radiation_density + dark_energy) a : Union[str, Any] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) a : int = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation lowerCAmelCase: Optional[Any] = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
297
1
'''simple docstring''' def lowerCamelCase__ ( _A ): if a < 0: raise ValueError('Input value must be a positive integer' ) elif isinstance(_A , _A ): raise TypeError('Input value must be a \'int\' type' ) return bin(_A ).count('1' ) if __name__ == "__main__": import doctest doctest.testmod()
297
'''simple docstring''' from __future__ import annotations import math class a__: def __init__( self : List[str] , __snake_case : int ): a : str = size # approximate the overall size of segment tree with given value a : Optional[int] = [0 for i in range(0 , 4 * size )] # create array to store lazy update a : Any = [0 for i in range(0 , 4 * size )] a : Dict = [0 for i in range(0 , 4 * size )] # flag for lazy update def lowercase_ ( self : int , __snake_case : int ): return idx * 2 def lowercase_ ( self : Dict , __snake_case : int ): return idx * 2 + 1 def lowercase_ ( self : Dict , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : list[int] ): if left_element == right_element: a : Tuple = a[left_element - 1] else: a : Tuple = (left_element + right_element) // 2 self.build(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case ) self.build(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case ) a : Union[str, Any] = max( self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] ) def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ): if self.flag[idx] is True: a : int = self.lazy[idx] a : Union[str, Any] = False if left_element != right_element: a : Dict = self.lazy[idx] a : int = self.lazy[idx] a : Tuple = True a : Optional[Any] = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: a : int = val if left_element != right_element: a : int = val a : Dict = val a : List[str] = True a : List[str] = True return True a : Tuple = (left_element + right_element) // 2 self.update(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) self.update(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case , __snake_case ) a : Optional[int] = max( self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] ) return True def lowercase_ ( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ): if self.flag[idx] is True: a : str = self.lazy[idx] a : Optional[Any] = False if left_element != right_element: a : Dict = self.lazy[idx] a : Union[str, Any] = self.lazy[idx] a : Dict = True a : int = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] a : Dict = (left_element + right_element) // 2 a : Optional[int] = self.query(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case ) a : Union[str, Any] = self.query(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case ) return max(__snake_case , __snake_case ) def __str__( self : Any ): return str([self.query(1 , 1 , self.size , __snake_case , __snake_case ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": lowerCAmelCase: Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] lowerCAmelCase: int = 1_5 lowerCAmelCase: Optional[int] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
297
1
'''simple docstring''' import os import numpy import onnx def lowerCamelCase__ ( _A , _A ): a : Any = a.name a : Tuple = b.name a : List[Any] = '' a : Union[str, Any] = '' a : Tuple = a == b a : List[Any] = name_a a : List[Any] = name_b return res def lowerCamelCase__ ( _A , _A , _A ): for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(_A , _A ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , _A , _A ) _graph_replace_input_with(node_proto.attribute[1].g , _A , _A ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , _A , _A ) def lowerCamelCase__ ( _A , _A , _A ): for n in graph_proto.node: _node_replace_input_with(_A , _A , _A ) def lowerCamelCase__ ( _A , _A , _A ): a : Union[str, Any] = list(model.graph.initializer ) a : int = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i a : int = inits[i].name a : List[Any] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , _A , _A ) def lowerCamelCase__ ( _A ): a : List[str] = os.path.dirname(_A ) a : Dict = os.path.basename(_A ) a : List[str] = onnx.load(os.path.join(_A , _A ) ) a : Union[str, Any] = list(model.graph.initializer ) a : Optional[Any] = set() a : Optional[Any] = {} a : Optional[Any] = [] a : Dict = 0 for i in range(len(_A ) ): if i in dup_set: continue for j in range(i + 1 , len(_A ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(_A ) dup_set.add(_A ) a : Any = inits[j].data_type a : List[str] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , _A ) total_reduced_size += mem_size a : Optional[Any] = inits[i].name a : str = inits[j].name if name_i in dup_map: dup_map[name_i].append(_A ) else: a : int = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) a : int = sorted(_A ) _remove_dup_initializers_from_model(_A , _A , _A ) a : int = 'optimized_' + model_file_name a : Optional[Any] = os.path.join(_A , _A ) onnx.save(_A , _A ) return new_model
297
'''simple docstring''' def lowerCamelCase__ ( _A , _A ): while second != 0: a : Union[str, Any] = first & second first ^= second a : Tuple = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase: Optional[int] = int(input('Enter the first number: ').strip()) lowerCAmelCase: Union[str, Any] = int(input('Enter the second number: ').strip()) print(F"{add(first, second) = }")
297
1
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar lowerCAmelCase: Dict = TypeVar('KEY') lowerCAmelCase: int = TypeVar('VAL') @dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ ) class a__( Generic[KEY, VAL] ): lowercase__ = 42 lowercase__ = 42 class a__( _Item ): def __init__( self : Any ): super().__init__(__snake_case , __snake_case ) def __bool__( self : Any ): return False lowerCAmelCase: int = _DeletedItem() class a__( MutableMapping[KEY, VAL] ): def __init__( self : Any , __snake_case : int = 8 , __snake_case : float = 0.75 ): a : Optional[Any] = initial_block_size a : list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 a : int = capacity_factor a : Optional[int] = 0 def lowercase_ ( self : Any , __snake_case : KEY ): return hash(__snake_case ) % len(self._buckets ) def lowercase_ ( self : List[Any] , __snake_case : int ): return (ind + 1) % len(self._buckets ) def lowercase_ ( self : Dict , __snake_case : int , __snake_case : KEY , __snake_case : VAL ): a : Optional[int] = self._buckets[ind] if not stored: a : int = _Item(__snake_case , __snake_case ) self._len += 1 return True elif stored.key == key: a : Optional[Any] = _Item(__snake_case , __snake_case ) return True else: return False def lowercase_ ( self : Optional[Any] ): a : List[str] = len(self._buckets ) * self._capacity_factor return len(self ) >= int(__snake_case ) def lowercase_ ( self : List[str] ): if len(self._buckets ) <= self._initial_block_size: return False a : Optional[Any] = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def lowercase_ ( self : Tuple , __snake_case : int ): a : int = self._buckets a : Tuple = [None] * new_size a : Tuple = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def lowercase_ ( self : List[Any] ): self._resize(len(self._buckets ) * 2 ) def lowercase_ ( self : int ): self._resize(len(self._buckets ) // 2 ) def lowercase_ ( self : int , __snake_case : KEY ): a : List[Any] = self._get_bucket_index(__snake_case ) for _ in range(len(self._buckets ) ): yield ind a : Dict = self._get_next_ind(__snake_case ) def lowercase_ ( self : Optional[Any] , __snake_case : KEY , __snake_case : VAL ): for ind in self._iterate_buckets(__snake_case ): if self._try_set(__snake_case , __snake_case , __snake_case ): break def __setitem__( self : List[Any] , __snake_case : KEY , __snake_case : VAL ): if self._is_full(): self._size_up() self._add_item(__snake_case , __snake_case ) def __delitem__( self : List[Any] , __snake_case : KEY ): for ind in self._iterate_buckets(__snake_case ): a : Any = self._buckets[ind] if item is None: raise KeyError(__snake_case ) if item is _deleted: continue if item.key == key: a : Union[str, Any] = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : int , __snake_case : KEY ): for ind in self._iterate_buckets(__snake_case ): a : Dict = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(__snake_case ) def __len__( self : Tuple ): return self._len def __iter__( self : int ): yield from (item.key for item in self._buckets if item) def __repr__( self : Optional[Any] ): a : Dict = ' ,'.join( F"""{item.key}: {item.val}""" for item in self._buckets if item ) return F"""HashMap({val_string})"""
297
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase__ ( _A , _A ): assert isinstance(_A , _A ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def lowerCamelCase__ ( _A , _A , _A ): a : str = tmp_path / 'cache' a : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def lowerCamelCase__ ( _A , _A , _A ): a : str = tmp_path / 'cache' a : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : Dict = features.copy() if features else default_expected_features a : Union[str, Any] = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( 'features' , [ None, {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}, ] , ) def lowerCamelCase__ ( _A , _A , _A ): a : Tuple = tmp_path / 'cache' a : Optional[Any] = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'} a : Optional[int] = features.copy() if features else default_expected_features a : Dict = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) a : Optional[int] = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowerCamelCase__ ( _A , _A ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} a : Dict = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'} a : int = features.copy() a : List[Any] = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) a : Dict = tmp_path / 'cache' a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def lowerCamelCase__ ( _A , _A , _A ): a : Dict = tmp_path / 'cache' a : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read() _check_json_dataset(_A , _A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def lowerCamelCase__ ( _A , _A , _A ): if issubclass(_A , _A ): a : Optional[int] = jsonl_path elif issubclass(_A , _A ): a : Optional[int] = [jsonl_path] a : List[str] = tmp_path / 'cache' a : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) def lowerCamelCase__ ( _A , _A , _A=("train",) ): assert isinstance(_A , _A ) for split in splits: a : str = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def lowerCamelCase__ ( _A , _A , _A ): a : Dict = tmp_path / 'cache' a : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): a : int = JsonDatasetReader({'train': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def lowerCamelCase__ ( _A , _A , _A ): a : Dict = tmp_path / 'cache' a : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : List[Any] = features.copy() if features else default_expected_features a : Any = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) a : List[str] = JsonDatasetReader({'train': jsonl_path} , features=_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def lowerCamelCase__ ( _A , _A , _A ): if split: a : Any = {split: jsonl_path} else: a : List[Any] = 'train' a : List[str] = {'train': jsonl_path, 'test': jsonl_path} a : List[Any] = tmp_path / 'cache' a : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase__ ( _A ): return json.load(_A ) def lowerCamelCase__ ( _A ): return [json.loads(_A ) for line in buffer] class a__: @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def lowercase_ ( self : Tuple , __snake_case : int , __snake_case : Optional[int] , __snake_case : Any ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case ).write() buffer.seek(0 ) a : List[str] = load_json_function(__snake_case ) assert isinstance(__snake_case , __snake_case ) assert isinstance(exported_content[0] , __snake_case ) assert len(__snake_case ) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def lowercase_ ( self : Tuple , __snake_case : Tuple , __snake_case : Any , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case ).write() buffer.seek(0 ) a : int = load_json(__snake_case ) assert isinstance(__snake_case , __snake_case ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(__snake_case ) == 10 @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def lowercase_ ( self : List[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Optional[Any] ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , num_proc=2 ).write() buffer.seek(0 ) a : List[Any] = load_json_function(__snake_case ) assert isinstance(__snake_case , __snake_case ) assert isinstance(exported_content[0] , __snake_case ) assert len(__snake_case ) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def lowercase_ ( self : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : int , __snake_case : List[Any] , __snake_case : Dict ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case , num_proc=2 ).write() buffer.seek(0 ) a : int = load_json(__snake_case ) assert isinstance(__snake_case , __snake_case ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(__snake_case ) == 10 def lowercase_ ( self : List[str] , __snake_case : str ): with pytest.raises(__snake_case ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , num_proc=0 ) @pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] ) def lowercase_ ( self : Tuple , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Optional[int] ): a : Tuple = tmp_path_factory.mktemp('data' ) / F"""test.json.{extension}""" a : List[Any] = str(shared_datadir / F"""test_file.json.{extension}""" ) JsonDatasetWriter(__snake_case , __snake_case , compression=__snake_case ).write() with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f: a : Union[str, Any] = f.read() with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f: a : Union[str, Any] = f.read() assert exported_content == original_content
297
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase__ = StableDiffusionXLImgaImgPipeline lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} lowercase__ = PipelineTesterMixin.required_optional_params - {"""latents"""} lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowercase_ ( self : str ): torch.manual_seed(0 ) a : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=__snake_case , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) a : Optional[Any] = EulerDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , ) torch.manual_seed(0 ) a : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) a : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=32 , ) a : Union[str, Any] = CLIPTextModel(__snake_case ) a : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__snake_case ) a : str = CLIPTextModelWithProjection(__snake_case ) a : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__snake_case ) a : Tuple = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'text_encoder_2': text_encoder_a, 'tokenizer_2': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def lowercase_ ( self : int , __snake_case : Tuple , __snake_case : Tuple=0 ): a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case ) a : List[Any] = image / 2 + 0.5 if str(__snake_case ).startswith('mps' ): a : int = torch.manual_seed(__snake_case ) else: a : List[str] = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) a : Tuple = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 5.0, 'output_type': 'numpy', 'strength': 0.75, } return inputs def lowercase_ ( self : str ): a : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator a : str = self.get_dummy_components() a : Optional[Any] = StableDiffusionXLImgaImgPipeline(**__snake_case ) a : List[Any] = sd_pipe.to(__snake_case ) sd_pipe.set_progress_bar_config(disable=__snake_case ) a : Optional[int] = self.get_dummy_inputs(__snake_case ) a : Union[str, Any] = sd_pipe(**__snake_case ).images a : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) a : Dict = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self : Any ): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def lowercase_ ( self : int ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def lowercase_ ( self : int ): pass def lowercase_ ( self : Union[str, Any] ): a : Optional[int] = self.get_dummy_components() a : int = StableDiffusionXLImgaImgPipeline(**__snake_case ) a : Optional[Any] = sd_pipe.to(__snake_case ) a : List[Any] = sd_pipe.to(__snake_case ) sd_pipe.set_progress_bar_config(disable=__snake_case ) # forward without prompt embeds a : Optional[int] = self.get_dummy_inputs(__snake_case ) a : Union[str, Any] = 3 * ['this is a negative prompt'] a : Optional[Any] = negative_prompt a : int = 3 * [inputs['prompt']] a : Dict = sd_pipe(**__snake_case ) a : Any = output.images[0, -3:, -3:, -1] # forward with prompt embeds a : Dict = self.get_dummy_inputs(__snake_case ) a : int = 3 * ['this is a negative prompt'] a : List[str] = 3 * [inputs.pop('prompt' )] ( ( a ) , ( a ) , ( a ) , ( a ) , ) : Union[str, Any] = sd_pipe.encode_prompt(__snake_case , negative_prompt=__snake_case ) a : Dict = sd_pipe( **__snake_case , prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , pooled_prompt_embeds=__snake_case , negative_pooled_prompt_embeds=__snake_case , ) a : int = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @slow @require_torch_gpu class a__( unittest.TestCase ): def lowercase_ ( self : Union[str, Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : Union[str, Any] , __snake_case : List[str] , __snake_case : Union[str, Any]="cpu" , __snake_case : str=torch.floataa , __snake_case : Optional[Any]=0 ): a : Any = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) a : Any = np.random.RandomState(__snake_case ).standard_normal((1, 4, 64, 64) ) a : Tuple = torch.from_numpy(__snake_case ).to(device=__snake_case , dtype=__snake_case ) a : Any = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def lowercase_ ( self : Union[str, Any] ): a : int = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) a : str = self.get_inputs(__snake_case ) a : int = pipe(**__snake_case ).images a : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 5_12, 3) a : str = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] ) assert np.abs(image_slice - expected_slice ).max() < 7e-3
297
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def lowerCamelCase__ ( _A = "laptop" ): a : Any = f"""https://www.amazon.in/laptop/s?k={product}""" a : Tuple = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36', 'Accept-Language': 'en-US, en;q=0.5', } a : Any = BeautifulSoup(requests.get(_A , headers=_A ).text ) # Initialize a Pandas dataframe with the column titles a : Any = DataFrame( columns=[ 'Product Title', 'Product Link', 'Current Price of the product', 'Product Rating', 'MRP of the product', 'Discount', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( 'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ): try: a : Optional[int] = item.ha.text a : str = 'https://www.amazon.in/' + item.ha.a['href'] a : List[str] = item.find('span' , attrs={'class': 'a-offscreen'} ).text try: a : Optional[Any] = item.find('span' , attrs={'class': 'a-icon-alt'} ).text except AttributeError: a : Union[str, Any] = 'Not available' try: a : str = ( '₹' + item.find( 'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1] ) except AttributeError: a : int = '' try: a : Union[str, Any] = float( ( ( float(product_mrp.strip('₹' ).replace(',' , '' ) ) - float(product_price.strip('₹' ).replace(',' , '' ) ) ) / float(product_mrp.strip('₹' ).replace(',' , '' ) ) ) * 100 ) except ValueError: a : Any = float('nan' ) except AttributeError: pass a : Any = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] a : Any = ' ' a : List[str] = ' ' data_frame.index += 1 return data_frame if __name__ == "__main__": lowerCAmelCase: str = 'headphones' get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
297
1
'''simple docstring''' import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def lowerCamelCase__ ( _A , _A , _A , _A=1024 ): a , a : List[Any] = [], [] a : Optional[Any] = list(zip(_A , _A ) ) a , a : List[Any] = sorted_examples[0] def is_too_big(_A ): return tok(_A , return_tensors='pt' ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): a : Optional[int] = new_src + ' ' + src a : Optional[Any] = new_tgt + ' ' + tgt if is_too_big(_A ) or is_too_big(_A ): # cant fit, finalize example finished_src.append(_A ) finished_tgt.append(_A ) a , a : int = src, tgt else: # can fit, keep adding a , a : Optional[int] = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(_A ) finished_tgt.append(_A ) return finished_src, finished_tgt def lowerCamelCase__ ( _A , _A , _A , _A ): a : Union[str, Any] = Path(_A ) save_path.mkdir(exist_ok=_A ) for split in ["train"]: a , a : Any = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" a : List[str] = [x.rstrip() for x in Path(_A ).open().readlines()] a : Tuple = [x.rstrip() for x in Path(_A ).open().readlines()] a , a : Dict = pack_examples(_A , _A , _A , _A ) print(f"""packed {split} split from {len(_A )} examples -> {len(_A )}.""" ) Path(save_path / f"""{split}.source""" ).open('w' ).write('\n'.join(_A ) ) Path(save_path / f"""{split}.target""" ).open('w' ).write('\n'.join(_A ) ) for split in ["val", "test"]: a , a : Union[str, Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" shutil.copyfile(_A , save_path / f"""{split}.source""" ) shutil.copyfile(_A , save_path / f"""{split}.target""" ) def lowerCamelCase__ ( ): a : int = argparse.ArgumentParser() parser.add_argument('--tok_name' , type=_A , help='like facebook/bart-large-cnn,t5-base, etc.' ) parser.add_argument('--max_seq_len' , type=_A , default=128 ) parser.add_argument('--data_dir' , type=_A ) parser.add_argument('--save_path' , type=_A ) a : Optional[Any] = parser.parse_args() a : str = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(_A , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
297
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase__ = StableUnCLIPImgaImgPipeline lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowercase__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase__ = frozenset([] ) def lowercase_ ( self : int ): a : Dict = 32 a : str = embedder_hidden_size # image encoding components a : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) a : Dict = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=__snake_case , projection_dim=__snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) a : Dict = StableUnCLIPImageNormalizer(embedding_dim=__snake_case ) a : Optional[int] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' ) torch.manual_seed(0 ) a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) torch.manual_seed(0 ) a : Tuple = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) a : Union[str, Any] = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , ) torch.manual_seed(0 ) a : List[Any] = DDIMScheduler( beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=__snake_case , steps_offset=1 , ) torch.manual_seed(0 ) a : List[str] = AutoencoderKL() a : str = { # image encoding components 'feature_extractor': feature_extractor, 'image_encoder': image_encoder.eval(), # image noising components 'image_normalizer': image_normalizer.eval(), 'image_noising_scheduler': image_noising_scheduler, # regular denoising components 'tokenizer': tokenizer, 'text_encoder': text_encoder.eval(), 'unet': unet.eval(), 'scheduler': scheduler, 'vae': vae.eval(), } return components def lowercase_ ( self : Tuple , __snake_case : List[str] , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=True ): if str(__snake_case ).startswith('mps' ): a : Tuple = torch.manual_seed(__snake_case ) else: a : List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) a : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case ) if pil_image: a : Optional[Any] = input_image * 0.5 + 0.5 a : Optional[Any] = input_image.clamp(0 , 1 ) a : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() a : int = DiffusionPipeline.numpy_to_pil(__snake_case )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def lowercase_ ( self : Optional[Any] ): a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator a : Union[str, Any] = self.get_dummy_components() a : Any = StableUnCLIPImgaImgPipeline(**__snake_case ) a : Tuple = sd_pipe.to(__snake_case ) sd_pipe.set_progress_bar_config(disable=__snake_case ) a : Union[str, Any] = self.get_dummy_inputs(__snake_case ) inputs.update({'image_embeds': None} ) a : str = sd_pipe(**__snake_case ).images a : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) a : Optional[int] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase_ ( self : List[str] ): a : int = torch_device in ['cpu', 'mps'] self._test_attention_slicing_forward_pass(test_max_difference=__snake_case ) def lowercase_ ( self : int ): a : Optional[int] = torch_device in ['cpu', 'mps'] self._test_inference_batch_single_identical(test_max_difference=__snake_case ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowercase_ ( self : Dict ): self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__snake_case ) @slow @require_torch_gpu class a__( unittest.TestCase ): def lowercase_ ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : Optional[Any] ): a : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' ) a : Union[str, Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' ) a : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained( 'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 ) a : Optional[int] = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' ) a : List[str] = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__snake_case , __snake_case ) def lowercase_ ( self : Optional[int] ): a : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' ) a : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' ) a : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained( 'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) a : str = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' ) a : List[str] = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__snake_case , __snake_case ) def lowercase_ ( self : Any ): a : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained( 'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa ) a : Optional[Any] = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() a : Optional[int] = pipe( __snake_case , 'anime turtle' , num_inference_steps=2 , output_type='np' , ) a : int = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
297
1
'''simple docstring''' import math def lowerCamelCase__ ( _A , _A ): return math.pow(_A , 2 ) - a def lowerCamelCase__ ( _A ): return 2 * x def lowerCamelCase__ ( _A ): a : str = 2.0 while start <= a: a : Optional[Any] = math.pow(_A , 2 ) return start def lowerCamelCase__ ( _A , _A = 9999 , _A = 0.00000000000001 ): if a < 0: raise ValueError('math domain error' ) a : Union[str, Any] = get_initial_point(_A ) for _ in range(_A ): a : str = value a : Tuple = value - fx(_A , _A ) / fx_derivative(_A ) if abs(prev_value - value ) < tolerance: return value return value if __name__ == "__main__": from doctest import testmod testmod()
297
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowerCAmelCase: Union[str, Any] = logging.get_logger(__name__) lowerCAmelCase: List[str] = { 't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json', } class a__( lowerCamelCase__ ): lowercase__ = """t5""" lowercase__ = ["""past_key_values"""] lowercase__ = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self : Union[str, Any] , __snake_case : int=3_21_28 , __snake_case : str=5_12 , __snake_case : Dict=64 , __snake_case : Optional[int]=20_48 , __snake_case : Tuple=6 , __snake_case : Any=None , __snake_case : Optional[int]=8 , __snake_case : str=32 , __snake_case : Union[str, Any]=1_28 , __snake_case : Optional[int]=0.1 , __snake_case : Dict=1e-6 , __snake_case : int=1.0 , __snake_case : Optional[int]="relu" , __snake_case : Any=True , __snake_case : List[str]=True , __snake_case : Union[str, Any]=0 , __snake_case : Dict=1 , **__snake_case : Optional[int] , ): a : Optional[int] = vocab_size a : Dict = d_model a : Union[str, Any] = d_kv a : Dict = d_ff a : Tuple = num_layers a : Dict = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry a : int = num_heads a : str = relative_attention_num_buckets a : List[Any] = relative_attention_max_distance a : int = dropout_rate a : Tuple = layer_norm_epsilon a : str = initializer_factor a : List[Any] = feed_forward_proj a : Union[str, Any] = use_cache a : List[str] = self.feed_forward_proj.split('-' ) a : int = act_info[-1] a : Union[str, Any] = act_info[0] == 'gated' if len(__snake_case ) > 1 and act_info[0] != "gated" or len(__snake_case ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": a : Optional[Any] = 'gelu_new' super().__init__( pad_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , **__snake_case , ) class a__( lowerCamelCase__ ): @property def lowercase_ ( self : Optional[int] ): a : Dict = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: a : Dict = 'past_encoder_sequence + sequence' a : Dict = {0: 'batch'} a : Any = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: a : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'} a : List[str] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(__snake_case , direction='inputs' ) return common_inputs @property def lowercase_ ( self : List[Any] ): return 13
297
1
'''simple docstring''' import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def lowerCamelCase__ ( _A , _A , _A , _A , _A ): # load base model a : Optional[Any] = StableDiffusionPipeline.from_pretrained(_A , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors a : Dict = load_file(_A ) a : List[Any] = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: a : str = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' ) a : List[str] = pipeline.text_encoder else: a : Dict = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' ) a : Optional[int] = pipeline.unet # find the target layer a : Dict = layer_infos.pop(0 ) while len(_A ) > -1: try: a : Union[str, Any] = curr_layer.__getattr__(_A ) if len(_A ) > 0: a : Tuple = layer_infos.pop(0 ) elif len(_A ) == 0: break except Exception: if len(_A ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: a : List[Any] = layer_infos.pop(0 ) a : Union[str, Any] = [] if "lora_down" in key: pair_keys.append(key.replace('lora_down' , 'lora_up' ) ) pair_keys.append(_A ) else: pair_keys.append(_A ) pair_keys.append(key.replace('lora_up' , 'lora_down' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: a : str = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) a : Tuple = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_A , _A ).unsqueeze(2 ).unsqueeze(3 ) else: a : Optional[Any] = state_dict[pair_keys[0]].to(torch.floataa ) a : int = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_A , _A ) # update visited list for item in pair_keys: visited.append(_A ) return pipeline if __name__ == "__main__": lowerCAmelCase: Tuple = argparse.ArgumentParser() parser.add_argument( '--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.' ) parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors' ) parser.add_argument( '--lora_prefix_text_encoder', default='lora_te', type=str, help='The prefix of text encoder weight in safetensors', ) parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW') parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.' ) parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') lowerCAmelCase: List[Any] = parser.parse_args() lowerCAmelCase: Tuple = args.base_model_path lowerCAmelCase: Union[str, Any] = args.checkpoint_path lowerCAmelCase: Union[str, Any] = args.dump_path lowerCAmelCase: List[str] = args.lora_prefix_unet lowerCAmelCase: int = args.lora_prefix_text_encoder lowerCAmelCase: List[Any] = args.alpha lowerCAmelCase: List[str] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) lowerCAmelCase: Optional[Any] = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
297
'''simple docstring''' from __future__ import annotations from math import pi, sqrt def lowerCamelCase__ ( _A , _A ): if inductance <= 0: raise ValueError('Inductance cannot be 0 or negative' ) elif capacitance <= 0: raise ValueError('Capacitance cannot be 0 or negative' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
297
1
'''simple docstring''' import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase: List[Any] = '▁' lowerCAmelCase: str = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece class a__( lowerCamelCase__ , unittest.TestCase ): lowercase__ = BertGenerationTokenizer lowercase__ = False lowercase__ = True def lowercase_ ( self : Any ): super().setUp() a : List[Any] = BertGenerationTokenizer(__snake_case , keep_accents=__snake_case ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self : Optional[Any] ): a : int = '<s>' a : Optional[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case ) def lowercase_ ( self : Optional[int] ): a : Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , '<pad>' ) self.assertEqual(len(__snake_case ) , 10_02 ) def lowercase_ ( self : List[str] ): self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def lowercase_ ( self : int ): a : List[str] = BertGenerationTokenizer(__snake_case , keep_accents=__snake_case ) a : List[Any] = tokenizer.tokenize('This is a test' ) self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__snake_case ) , [2_85, 46, 10, 1_70, 3_82] , ) a : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( __snake_case , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) a : Any = tokenizer.convert_tokens_to_ids(__snake_case ) self.assertListEqual( __snake_case , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) a : Tuple = tokenizer.convert_ids_to_tokens(__snake_case ) self.assertListEqual( __snake_case , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def lowercase_ ( self : int ): return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) @slow def lowercase_ ( self : Optional[int] ): a : Optional[int] = 'Hello World!' a : List[Any] = [1_85_36, 22_60, 1_01] self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) ) @slow def lowercase_ ( self : Union[str, Any] ): a : Optional[Any] = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) a : Any = [ 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, ] self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) ) @require_torch @slow def lowercase_ ( self : Union[str, Any] ): import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence a : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10] a : Optional[int] = ' '.join(__snake_case ) a : Optional[Any] = self.big_tokenizer.encode_plus(__snake_case , return_tensors='pt' , return_token_type_ids=__snake_case ) a : Union[str, Any] = self.big_tokenizer.batch_encode_plus( [sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=__snake_case ) a : List[str] = BertGenerationConfig() a : Union[str, Any] = BertGenerationEncoder(__snake_case ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__snake_case ) model(**__snake_case ) @slow def lowercase_ ( self : Union[str, Any] ): # fmt: off a : List[str] = {'input_ids': [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__snake_case , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
297
'''simple docstring''' import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase: Any = logging.get_logger(__name__) lowerCAmelCase: Any = {'vocab_file': 'vocab.txt'} lowerCAmelCase: List[Any] = { 'vocab_file': { 'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt', }, } lowerCAmelCase: str = { 'openbmb/cpm-ant-10b': 1_0_2_4, } def lowerCamelCase__ ( _A ): a : Union[str, Any] = collections.OrderedDict() with open(_A , 'r' , encoding='utf-8' ) as reader: a : int = reader.readlines() for index, token in enumerate(_A ): a : int = token.rstrip('\n' ) a : List[Any] = index return vocab class a__( lowerCamelCase__ ): def __init__( self : Dict , __snake_case : Any , __snake_case : Dict="<unk>" , __snake_case : str=2_00 ): a : List[Any] = vocab a : Any = unk_token a : List[str] = max_input_chars_per_word def lowercase_ ( self : Optional[int] , __snake_case : Union[str, Any] ): a : Optional[Any] = list(__snake_case ) if len(__snake_case ) > self.max_input_chars_per_word: return [self.unk_token] a : Any = 0 a : Optional[Any] = [] while start < len(__snake_case ): a : Optional[int] = len(__snake_case ) a : str = None while start < end: a : Optional[Any] = ''.join(chars[start:end] ) if substr in self.vocab: a : List[str] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(__snake_case ) a : List[str] = end return sub_tokens class a__( lowerCamelCase__ ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = ["""input_ids""", """attention_mask"""] lowercase__ = False def __init__( self : Any , __snake_case : str , __snake_case : Tuple="<d>" , __snake_case : List[str]="</d>" , __snake_case : Dict="<s>" , __snake_case : List[Any]="</s>" , __snake_case : int="<pad>" , __snake_case : Any="<unk>" , __snake_case : List[str]="</n>" , __snake_case : int="</_>" , __snake_case : Optional[Any]="left" , **__snake_case : Dict , ): requires_backends(self , ['jieba'] ) super().__init__( bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , ) a : Union[str, Any] = bod_token a : Any = eod_token a : List[str] = load_vocab(__snake_case ) a : Optional[int] = self.encoder[space_token] a : str = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] a : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) ) a : Tuple = {v: k for k, v in self.encoder.items()} a : List[str] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def lowercase_ ( self : Optional[int] ): return self.encoder[self.bod_token] @property def lowercase_ ( self : Dict ): return self.encoder[self.eod_token] @property def lowercase_ ( self : Any ): return self.encoder["\n"] @property def lowercase_ ( self : Tuple ): return len(self.encoder ) def lowercase_ ( self : str ): return dict(self.encoder , **self.added_tokens_encoder ) def lowercase_ ( self : Union[str, Any] , __snake_case : List[str] ): a : List[str] = [] for x in jieba.cut(__snake_case , cut_all=__snake_case ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) ) return output_tokens def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[Any] , **__snake_case : Optional[Any] ): a : Optional[int] = [i for i in token_ids if i >= 0] a : Any = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(__snake_case , **__snake_case ) def lowercase_ ( self : Optional[int] , __snake_case : int ): return token in self.encoder def lowercase_ ( self : int , __snake_case : List[str] ): return "".join(__snake_case ) def lowercase_ ( self : List[str] , __snake_case : Union[str, Any] ): return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) ) def lowercase_ ( self : Tuple , __snake_case : List[str] ): return self.decoder.get(__snake_case , self.unk_token ) def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] = None ): if os.path.isdir(__snake_case ): a : Optional[int] = os.path.join( __snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: a : int = (filename_prefix + '-' if filename_prefix else '') + save_directory a : Any = 0 if " " in self.encoder: a : Union[str, Any] = self.encoder[' '] del self.encoder[" "] if "\n" in self.encoder: a : Tuple = self.encoder['\n'] del self.encoder["\n"] a : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) ) with open(__snake_case , 'w' , encoding='utf-8' ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ' Please check that the vocabulary is not corrupted!' ) a : List[Any] = token_index writer.write(token + '\n' ) index += 1 return (vocab_file,) def lowercase_ ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : List[int] = None ): if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def lowercase_ ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case ) if token_ids_a is not None: return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) return [1] + ([0] * len(__snake_case ))
297
1
'''simple docstring''' from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) lowerCAmelCase: Optional[int] = 2_9_9_7_9_2_4_5_8 # Symbols lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase: str = symbols('ct x y z') def lowerCamelCase__ ( _A ): if velocity > c: raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('Speed must be greater than or equal to 1!' ) return velocity / c def lowerCamelCase__ ( _A ): return 1 / sqrt(1 - beta(_A ) ** 2 ) def lowerCamelCase__ ( _A ): return np.array( [ [gamma(_A ), -gamma(_A ) * beta(_A ), 0, 0], [-gamma(_A ) * beta(_A ), gamma(_A ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def lowerCamelCase__ ( _A , _A = None ): # Ensure event is not empty if event is None: a : Any = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(_A ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: lowerCAmelCase: Union[str, Any] = transform(2_9_9_7_9_2_4_5) print('Example of four vector: ') print(F"ct' = {four_vector[0]}") print(F"x' = {four_vector[1]}") print(F"y' = {four_vector[2]}") print(F"z' = {four_vector[3]}") # Substitute symbols with numerical values lowerCAmelCase: Optional[Any] = {ct: c, x: 1, y: 1, z: 1} lowerCAmelCase: List[str] = [four_vector[i].subs(sub_dict) for i in range(4)] print(F"\n{numerical_vector}")
297
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class a__( unittest.TestCase ): @slow def lowercase_ ( self : List[Any] ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: a : Optional[int] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = TFAutoModel.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Tuple = AutoModel.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : str ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: a : List[str] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : List[Any] = AutoModelForPreTraining.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : int ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Union[str, Any] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : List[Any] = TFAutoModelForCausalLM.from_pretrained(__snake_case , from_pt=__snake_case ) a , a : Any = TFAutoModelForCausalLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = AutoModelForCausalLM.from_pretrained(__snake_case , from_tf=__snake_case ) a , a : Tuple = AutoModelForCausalLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : Any ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Tuple = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : List[str] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : Optional[int] ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : List[str] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(__snake_case , from_pt=__snake_case ) a , a : Optional[int] = TFAutoModelForMaskedLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : str = AutoModelForMaskedLM.from_pretrained(__snake_case , from_tf=__snake_case ) a , a : Tuple = AutoModelForMaskedLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : int ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Optional[Any] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : str = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_pt=__snake_case ) a , a : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = AutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_tf=__snake_case ) a , a : str = AutoModelForSeqaSeqLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : Optional[Any] ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: a : Tuple = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = AutoModelForSequenceClassification.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : str ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: a : Optional[Any] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : int = TFAutoModelForQuestionAnswering.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Tuple = AutoModelForQuestionAnswering.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) def lowercase_ ( self : Tuple ): a : List[Any] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 ) a : Optional[int] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 ) def lowercase_ ( self : Any ): a : int = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 ) a : Optional[Any] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
297
1
'''simple docstring''' def lowerCamelCase__ ( _A = 400_0000 ): a : Optional[Any] = [0, 1] a : str = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 a : Tuple = 0 for j in range(len(_A ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(F"{solution() = }")
297
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase: List[Any] = logging.get_logger(__name__) lowerCAmelCase: List[Any] = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class a__( lowerCamelCase__ ): lowercase__ = """roberta""" def __init__( self : Tuple , __snake_case : List[str]=5_02_65 , __snake_case : int=7_68 , __snake_case : Union[str, Any]=12 , __snake_case : Dict=12 , __snake_case : Tuple=30_72 , __snake_case : Optional[Any]="gelu" , __snake_case : str=0.1 , __snake_case : Any=0.1 , __snake_case : str=5_12 , __snake_case : int=2 , __snake_case : Any=0.02 , __snake_case : int=1e-1_2 , __snake_case : str=1 , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=2 , __snake_case : Optional[int]="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=None , **__snake_case : str , ): super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) a : List[str] = vocab_size a : str = hidden_size a : Tuple = num_hidden_layers a : Dict = num_attention_heads a : List[Any] = hidden_act a : str = intermediate_size a : Union[str, Any] = hidden_dropout_prob a : Optional[Any] = attention_probs_dropout_prob a : Any = max_position_embeddings a : Optional[int] = type_vocab_size a : str = initializer_range a : List[Any] = layer_norm_eps a : Optional[int] = position_embedding_type a : Dict = use_cache a : Any = classifier_dropout class a__( lowerCamelCase__ ): @property def lowercase_ ( self : int ): if self.task == "multiple-choice": a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: a : str = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
297
1
'''simple docstring''' lowerCAmelCase: Optional[Any] = { 'Pillow': 'Pillow<10.0.0', 'accelerate': 'accelerate>=0.20.3', 'av': 'av==9.2.0', 'beautifulsoup4': 'beautifulsoup4', 'black': 'black~=23.1', 'codecarbon': 'codecarbon==1.2.0', 'cookiecutter': 'cookiecutter==1.7.3', 'dataclasses': 'dataclasses', 'datasets': 'datasets!=2.5.0', 'decord': 'decord==0.6.0', 'deepspeed': 'deepspeed>=0.9.3', 'diffusers': 'diffusers', 'dill': 'dill<0.3.5', 'evaluate': 'evaluate>=0.2.0', 'fairscale': 'fairscale>0.3', 'faiss-cpu': 'faiss-cpu', 'fastapi': 'fastapi', 'filelock': 'filelock', 'flax': 'flax>=0.4.1,<=0.7.0', 'ftfy': 'ftfy', 'fugashi': 'fugashi>=1.0', 'GitPython': 'GitPython<3.1.19', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0', 'importlib_metadata': 'importlib_metadata', 'ipadic': 'ipadic>=1.0.0,<2.0', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13', 'jaxlib': 'jaxlib>=0.1.65,<=0.4.13', 'jieba': 'jieba', 'kenlm': 'kenlm', 'keras-nlp': 'keras-nlp>=0.3.1', 'librosa': 'librosa', 'nltk': 'nltk', 'natten': 'natten>=0.14.6', 'numpy': 'numpy>=1.17', 'onnxconverter-common': 'onnxconverter-common', 'onnxruntime-tools': 'onnxruntime-tools>=1.4.2', 'onnxruntime': 'onnxruntime>=1.4.0', 'opencv-python': 'opencv-python', 'optuna': 'optuna', 'optax': 'optax>=0.0.8,<=0.1.4', 'packaging': 'packaging>=20.0', 'parameterized': 'parameterized', 'phonemizer': 'phonemizer', 'protobuf': 'protobuf', 'psutil': 'psutil', 'pyyaml': 'pyyaml>=5.1', 'pydantic': 'pydantic<2', 'pytest': 'pytest>=7.2.0', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'python': 'python>=3.8.0', 'ray[tune]': 'ray[tune]', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'rhoknp': 'rhoknp>=1.1.0,<1.3.1', 'rjieba': 'rjieba', 'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1', 'ruff': 'ruff>=0.0.241,<=0.0.259', 'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0', 'sacremoses': 'sacremoses', 'safetensors': 'safetensors>=0.3.1', 'sagemaker': 'sagemaker>=2.31.0', 'scikit-learn': 'scikit-learn', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'sigopt': 'sigopt', 'starlette': 'starlette', 'sudachipy': 'sudachipy>=0.6.6', 'sudachidict_core': 'sudachidict_core>=20220729', 'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14', 'tensorflow': 'tensorflow>=2.6,<2.14', 'tensorflow-text': 'tensorflow-text<2.14', 'tf2onnx': 'tf2onnx', 'timeout-decorator': 'timeout-decorator', 'timm': 'timm', 'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14', 'torch': 'torch>=1.9,!=1.12.0', 'torchaudio': 'torchaudio', 'torchvision': 'torchvision', 'pyctcdecode': 'pyctcdecode>=0.4.0', 'tqdm': 'tqdm>=4.27', 'unidic': 'unidic>=1.0.2', 'unidic_lite': 'unidic_lite>=1.0.7', 'urllib3': 'urllib3<2.0.0', 'uvicorn': 'uvicorn', }
297
'''simple docstring''' def lowerCamelCase__ ( _A ): return 10 - x * x def lowerCamelCase__ ( _A , _A ): # Bolzano theory in order to find if there is a root between a and b if equation(_A ) * equation(_A ) >= 0: raise ValueError('Wrong space!' ) a : Tuple = a while (b - a) >= 0.01: # Find middle point a : Tuple = (a + b) / 2 # Check if middle point is root if equation(_A ) == 0.0: break # Decide the side to repeat the steps if equation(_A ) * equation(_A ) < 0: a : List[str] = c else: a : Tuple = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
297
1
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase__ ( _A , _A , _A ): return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :] def lowerCamelCase__ ( _A , _A , _A , _A="attention" ): a : Union[str, Any] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] ) a : Any = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) a : Tuple = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] ) a : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) a : str = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] ) a : Tuple = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) a : List[Any] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] ) a : List[str] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def lowerCamelCase__ ( _A , _A , _A , _A=False ): if split_mlp_wi: a : str = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :] a : Optional[Any] = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :] a : int = (wi_a, wi_a) else: a : List[Any] = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :] a : List[Any] = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :] return wi, wo def lowerCamelCase__ ( _A , _A , _A , _A ): return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i] def lowerCamelCase__ ( _A , *, _A , _A , _A = False ): a : Optional[int] = traverse_util.flatten_dict(variables['target'] ) a : Union[str, Any] = {'/'.join(_A ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi a : Optional[Any] = 'encoder/encoder/mlp/wi_0/kernel' in old print('Split MLP:' , _A ) a : Union[str, Any] = collections.OrderedDict() # Shared embeddings. a : int = old['token_embedder/embedding'] # Encoder. for i in range(_A ): # Block i, layer 0 (Self Attention). a : List[Any] = tax_layer_norm_lookup(_A , _A , 'encoder' , 'pre_attention_layer_norm' ) a , a , a , a : Dict = tax_attention_lookup(_A , _A , 'encoder' , 'attention' ) a : str = layer_norm a : List[str] = k.T a : Union[str, Any] = o.T a : Optional[int] = q.T a : Tuple = v.T # Block i, layer 1 (MLP). a : Optional[Any] = tax_layer_norm_lookup(_A , _A , 'encoder' , 'pre_mlp_layer_norm' ) a , a : Optional[int] = tax_mlp_lookup(_A , _A , 'encoder' , _A ) a : Tuple = layer_norm if split_mlp_wi: a : str = wi[0].T a : str = wi[1].T else: a : Union[str, Any] = wi.T a : Union[str, Any] = wo.T if scalable_attention: # convert the rel_embedding of each layer a : Optional[int] = tax_relpos_bias_lookup( _A , _A , 'encoder' ).T a : Dict = old['encoder/encoder_norm/scale'] if not scalable_attention: a : List[Any] = tax_relpos_bias_lookup( _A , 0 , 'encoder' ).T a : List[Any] = tax_relpos_bias_lookup( _A , 0 , 'decoder' ).T if not is_encoder_only: # Decoder. for i in range(_A ): # Block i, layer 0 (Self Attention). a : str = tax_layer_norm_lookup(_A , _A , 'decoder' , 'pre_self_attention_layer_norm' ) a , a , a , a : List[str] = tax_attention_lookup(_A , _A , 'decoder' , 'self_attention' ) a : Dict = layer_norm a : Union[str, Any] = k.T a : List[Any] = o.T a : Optional[int] = q.T a : List[str] = v.T # Block i, layer 1 (Cross Attention). a : List[Any] = tax_layer_norm_lookup(_A , _A , 'decoder' , 'pre_cross_attention_layer_norm' ) a , a , a , a : Tuple = tax_attention_lookup(_A , _A , 'decoder' , 'encoder_decoder_attention' ) a : List[str] = layer_norm a : Dict = k.T a : int = o.T a : str = q.T a : Tuple = v.T # Block i, layer 2 (MLP). a : Tuple = tax_layer_norm_lookup(_A , _A , 'decoder' , 'pre_mlp_layer_norm' ) a , a : List[Any] = tax_mlp_lookup(_A , _A , 'decoder' , _A ) a : Dict = layer_norm if split_mlp_wi: a : int = wi[0].T a : int = wi[1].T else: a : Optional[int] = wi.T a : Optional[Any] = wo.T if scalable_attention: # convert the rel_embedding of each layer a : List[Any] = tax_relpos_bias_lookup(_A , _A , 'decoder' ).T a : Tuple = old['decoder/decoder_norm/scale'] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: a : Tuple = old['decoder/logits_dense/kernel'].T return new def lowerCamelCase__ ( _A , _A ): a : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: a : Union[str, Any] = state_dict['shared.weight'] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: a : List[str] = state_dict['shared.weight'] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('Using shared word embeddings as lm_head.' ) a : str = state_dict['shared.weight'] return state_dict def lowerCamelCase__ ( _A , _A , _A , _A , _A ): a : Tuple = checkpoints.load_tax_checkpoint(_A ) a : Dict = convert_tax_to_pytorch( _A , num_layers=config.num_layers , is_encoder_only=_A , scalable_attention=_A ) a : str = make_state_dict(_A , _A ) model.load_state_dict(_A , strict=_A ) def lowerCamelCase__ ( _A , _A , _A , _A = False , _A = False , ): a : Dict = MTaConfig.from_json_file(_A ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: a : Dict = UMTaEncoderModel(_A ) else: a : Optional[int] = UMTaForConditionalGeneration(_A ) # Load weights from tf checkpoint load_tax_weights_in_ta(_A , _A , _A , _A , _A ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(_A ) # Verify that we can load the checkpoint. model.from_pretrained(_A ) print('Done' ) if __name__ == "__main__": lowerCAmelCase: Optional[Any] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) parser.add_argument( '--scalable_attention', action='store_true', help='Whether the model uses scaled attention (umt5 model)', default=False, ) lowerCAmelCase: Dict = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
297
'''simple docstring''' import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class a__: def __init__( self : str , __snake_case : Union[str, Any] , __snake_case : List[str]=13 , __snake_case : Tuple=7 , __snake_case : Optional[Any]=False , __snake_case : Dict=True , __snake_case : List[Any]=False , __snake_case : Optional[int]=False , __snake_case : Optional[Any]=19 , __snake_case : Any=32 , __snake_case : Union[str, Any]=5 , __snake_case : Union[str, Any]=4 , __snake_case : int=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=0.1 , __snake_case : int=5_12 , __snake_case : int=16 , __snake_case : Tuple=2 , __snake_case : str=0.02 , __snake_case : str=3 , __snake_case : Dict=4 , __snake_case : List[Any]=None , ): a : Tuple = parent a : List[str] = batch_size a : Optional[Any] = seq_length a : Tuple = is_training a : Optional[Any] = use_input_mask a : List[Any] = use_token_type_ids a : List[Any] = use_labels a : int = vocab_size a : Union[str, Any] = hidden_size a : Any = num_hidden_layers a : List[str] = num_attention_heads a : int = intermediate_size a : str = hidden_act a : Tuple = hidden_dropout_prob a : Union[str, Any] = attention_probs_dropout_prob a : List[str] = max_position_embeddings a : Any = type_vocab_size a : List[str] = type_sequence_label_size a : Union[str, Any] = initializer_range a : Optional[int] = num_labels a : Optional[Any] = num_choices a : Optional[int] = scope def lowercase_ ( self : List[Any] ): a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a : Dict = None if self.use_input_mask: a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) a : Optional[Any] = None a : Optional[int] = None a : Dict = None if self.use_labels: a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a : List[str] = ids_tensor([self.batch_size] , self.num_choices ) a : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase_ ( self : List[Any] ): a : Any = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__snake_case , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , ) return config def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Any ): a : Tuple = EsmForProteinFolding(config=__snake_case ).float() model.to(__snake_case ) model.eval() a : Dict = model(__snake_case , attention_mask=__snake_case ) a : Union[str, Any] = model(__snake_case ) a : List[Any] = model(__snake_case ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def lowercase_ ( self : Optional[Any] ): a : Tuple = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) : Optional[Any] = config_and_inputs a : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase__ = False lowercase__ = (EsmForProteinFolding,) if is_torch_available() else () lowercase__ = () lowercase__ = {} if is_torch_available() else {} lowercase__ = False def lowercase_ ( self : int ): a : Tuple = EsmFoldModelTester(self ) a : Any = ConfigTester(self , config_class=__snake_case , hidden_size=37 ) def lowercase_ ( self : List[str] ): self.config_tester.run_common_tests() def lowercase_ ( self : Union[str, Any] ): a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) @unittest.skip('Does not support attention outputs' ) def lowercase_ ( self : str ): pass @unittest.skip def lowercase_ ( self : Optional[int] ): pass @unittest.skip('Esm does not support embedding resizing' ) def lowercase_ ( self : Optional[int] ): pass @unittest.skip('Esm does not support embedding resizing' ) def lowercase_ ( self : Any ): pass @unittest.skip('ESMFold does not support passing input embeds!' ) def lowercase_ ( self : Any ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : Union[str, Any] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : int ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold does not output hidden states in the normal way.' ) def lowercase_ ( self : int ): pass @unittest.skip('ESMfold does not output hidden states in the normal way.' ) def lowercase_ ( self : int ): pass @unittest.skip('ESMFold only has one output format.' ) def lowercase_ ( self : Dict ): pass @unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' ) def lowercase_ ( self : Tuple ): pass @unittest.skip('ESMFold does not support input chunking.' ) def lowercase_ ( self : List[str] ): pass @unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowercase_ ( self : Union[str, Any] ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowercase_ ( self : Any ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowercase_ ( self : List[str] ): pass @unittest.skip('ESMFold doesn\'t support data parallel.' ) def lowercase_ ( self : Dict ): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase_ ( self : Union[str, Any] ): pass @require_torch class a__( lowerCamelCase__ ): @slow def lowercase_ ( self : Optional[int] ): a : Optional[Any] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float() model.eval() a : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) a : Any = model(__snake_case )['positions'] a : Dict = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __snake_case , atol=1e-4 ) )
297
1
'''simple docstring''' import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowerCamelCase__ ( _A ): a : Dict = {} a : Tuple = tokenizer(example['content'] , truncation=_A )['input_ids'] a : Any = len(example['content'] ) / len(output['input_ids'] ) return output lowerCAmelCase: Dict = HfArgumentParser(PretokenizationArguments) lowerCAmelCase: str = parser.parse_args() if args.num_workers is None: lowerCAmelCase: Tuple = multiprocessing.cpu_count() lowerCAmelCase: Any = AutoTokenizer.from_pretrained(args.tokenizer_dir) lowerCAmelCase: List[str] = time.time() lowerCAmelCase: Tuple = load_dataset(args.dataset_name, split='train') print(F"Dataset loaded in {time.time()-t_start:.2f}s") lowerCAmelCase: List[str] = time.time() lowerCAmelCase: Union[str, Any] = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ 'repo_name', 'path', 'copies', 'size', 'content', 'license', 'hash', 'line_mean', 'line_max', 'alpha_frac', 'autogenerated', ], ) print(F"Dataset tokenized in {time.time()-t_start:.2f}s") lowerCAmelCase: List[str] = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F"Data pushed to the hub in {time.time()-t_start:.2f}s")
297
'''simple docstring''' from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class a__( nn.Module ): def __init__( self : Any , __snake_case : int = 16 , __snake_case : int = 88 , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : float = 0.0 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , ): super().__init__() a : Optional[int] = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__snake_case , attention_head_dim=__snake_case , in_channels=__snake_case , num_layers=__snake_case , dropout=__snake_case , norm_num_groups=__snake_case , cross_attention_dim=__snake_case , attention_bias=__snake_case , sample_size=__snake_case , num_vector_embeds=__snake_case , activation_fn=__snake_case , num_embeds_ada_norm=__snake_case , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference a : Union[str, Any] = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` a : Tuple = [77, 2_57] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` a : Any = [1, 0] def lowercase_ ( self : str , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Dict=None , __snake_case : bool = True , ): a : Dict = hidden_states a : Tuple = [] a : Optional[int] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens a : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] a : Tuple = self.transformer_index_for_condition[i] a : Union[str, Any] = self.transformers[transformer_index]( __snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , return_dict=__snake_case , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] a : Optional[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) a : int = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__snake_case )
297
1
'''simple docstring''' from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS lowerCAmelCase: str = logging.get_logger(__name__) lowerCAmelCase: Tuple = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, 'constant': get_constant_schedule, 'constant_w_warmup': get_constant_schedule_with_warmup, } class a__( lowerCamelCase__ ): def __init__( self : Optional[Any] , __snake_case : List[str]=None , __snake_case : Dict=None , *__snake_case : Any , **__snake_case : Union[str, Any] ): super().__init__(*__snake_case , **__snake_case ) if config is None: assert isinstance(self.model , __snake_case ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" F""" {self.model.__class__}""" ) a : int = self.model.config else: a : Optional[Any] = config a : Any = data_args a : int = self.config.tgt_vocab_size if isinstance(self.config , __snake_case ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for""" ' padding..' ) if self.args.label_smoothing == 0: a : Any = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss a : Tuple = label_smoothed_nll_loss def lowercase_ ( self : Union[str, Any] , __snake_case : int ): if self.optimizer is None: a : Dict = ['bias', 'LayerNorm.weight'] a : int = [ { 'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], 'weight_decay': self.args.weight_decay, }, { 'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] a : int = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: a : Dict = Adafactor a : List[str] = {'scale_parameter': False, 'relative_step': False} else: a : int = AdamW a : List[str] = { 'betas': (self.args.adam_betaa, self.args.adam_betaa), 'eps': self.args.adam_epsilon, } a : int = self.args.learning_rate if self.sharded_ddp: a : Tuple = OSS( params=__snake_case , optim=__snake_case , **__snake_case , ) else: a : Tuple = optimizer_cls(__snake_case , **__snake_case ) if self.lr_scheduler is None: a : Tuple = self._get_lr_scheduler(__snake_case ) else: # ignoring --lr_scheduler logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' ) def lowercase_ ( self : List[Any] , __snake_case : List[str] ): a : List[Any] = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": a : Dict = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": a : Optional[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: a : Dict = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__snake_case ) return scheduler def lowercase_ ( self : Tuple ): if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def lowercase_ ( self : Optional[Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : int ): if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token a : int = model(**__snake_case , use_cache=__snake_case )[0] a : Union[str, Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models a , a : Optional[Any] = model(**__snake_case , labels=__snake_case , use_cache=__snake_case )[:2] else: # compute label smoothed loss a : List[str] = model(**__snake_case , use_cache=__snake_case )[0] a : Any = torch.nn.functional.log_softmax(__snake_case , dim=-1 ) a , a : Optional[Any] = self.loss_fn(__snake_case , __snake_case , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def lowercase_ ( self : List[str] , __snake_case : Tuple , __snake_case : str ): a : int = inputs.pop('labels' ) a , a : Dict = self._compute_loss(__snake_case , __snake_case , __snake_case ) return loss def lowercase_ ( self : Tuple , __snake_case : nn.Module , __snake_case : Dict[str, Union[torch.Tensor, Any]] , __snake_case : bool , __snake_case : Optional[List[str]] = None , ): a : str = self._prepare_inputs(__snake_case ) a : Optional[Any] = { 'max_length': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, 'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: a : int = self.model.generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **__snake_case , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: a : Union[str, Any] = self._pad_tensors_to_max_len(__snake_case , gen_kwargs['max_length'] ) a : Tuple = inputs.pop('labels' ) with torch.no_grad(): # compute loss on predict data a , a : int = self._compute_loss(__snake_case , __snake_case , __snake_case ) a : List[str] = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) a : Tuple = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: a : Optional[Any] = self._pad_tensors_to_max_len(__snake_case , gen_kwargs['max_length'] ) return (loss, logits, labels) def lowercase_ ( self : Any , __snake_case : Dict , __snake_case : List[Any] ): # If PAD token is not defined at least EOS token has to be defined a : Any = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( 'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be' F""" padded to `max_length`={max_length}""" ) a : str = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) a : Union[str, Any] = tensor return padded_tensor
297
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase: Union[str, Any] = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: List[Any] = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Any = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase: Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
297
1
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness lowerCAmelCase: List[str] = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n' lowerCAmelCase: Optional[int] = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n' lowerCAmelCase: Optional[int] = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n' lowerCAmelCase: Any = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n' lowerCAmelCase: List[str] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__( datasets.Metric ): def lowercase_ ( self : Union[str, Any] ): return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' ) ), 'references': datasets.Value('string' ), } ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , ) def lowercase_ ( self : Dict , __snake_case : int , __snake_case : List[Any] , __snake_case : int=[1, 10, 1_00] , __snake_case : int=4 , __snake_case : List[str]=3.0 ): if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError('This metric is currently not supported on Windows.' ) with ThreadPoolExecutor(max_workers=__snake_case ) as executor: a : int = [] a : List[Any] = Counter() a : Union[str, Any] = 0 a : Dict = defaultdict(__snake_case ) for task_id, (candidates, test_case) in enumerate(zip(__snake_case , __snake_case ) ): for candidate in candidates: a : Optional[int] = candidate + '\n' + test_case a : Tuple = (test_program, timeout, task_id, completion_id[task_id]) a : List[str] = executor.submit(__snake_case , *__snake_case ) futures.append(__snake_case ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(__snake_case ): a : Dict = future.result() results[result["task_id"]].append((result['completion_id'], result) ) a , a : Tuple = [], [] for result in results.values(): result.sort() a : str = [r[1]['passed'] for r in result] total.append(len(__snake_case ) ) correct.append(sum(__snake_case ) ) a : Dict = np.array(__snake_case ) a : Any = np.array(__snake_case ) a : Optional[Any] = k a : Tuple = {F"""pass@{k}""": estimate_pass_at_k(__snake_case , __snake_case , __snake_case ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def lowerCamelCase__ ( _A , _A , _A ): def estimator(_A , _A , _A ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(_A , _A ): a : List[str] = itertools.repeat(_A , len(_A ) ) else: assert len(_A ) == len(_A ) a : Union[str, Any] = iter(_A ) return np.array([estimator(int(_A ) , int(_A ) , _A ) for n, c in zip(_A , _A )] )
297
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase: str = { 'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'], 'processing_mgp_str': ['MgpstrProcessor'], 'tokenization_mgp_str': ['MgpstrTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Optional[Any] = [ 'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST', 'MgpstrModel', 'MgpstrPreTrainedModel', 'MgpstrForSceneTextRecognition', ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys lowerCAmelCase: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
297
1
'''simple docstring''' import random from typing import Any def lowerCamelCase__ ( _A ): for _ in range(len(_A ) ): a : Optional[int] = random.randint(0 , len(_A ) - 1 ) a : Dict = random.randint(0 , len(_A ) - 1 ) a , a : Optional[Any] = data[b], data[a] return data if __name__ == "__main__": lowerCAmelCase: Any = [0, 1, 2, 3, 4, 5, 6, 7] lowerCAmelCase: Tuple = ['python', 'says', 'hello', '!'] print('Fisher-Yates Shuffle:') print('List', integers, strings) print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
297
'''simple docstring''' import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowerCAmelCase: Dict = logging.get_logger(__name__) lowerCAmelCase: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase: List[Any] = { 'vocab_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json', }, 'merges_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt', }, 'tokenizer_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json', }, } lowerCAmelCase: str = { 'allenai/led-base-16384': 1_6_3_8_4, } class a__( lowerCamelCase__ ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = LEDTokenizer lowercase__ = ["""input_ids""", """attention_mask"""] def __init__( self : List[Any] , __snake_case : Optional[Any]=None , __snake_case : List[str]=None , __snake_case : Tuple=None , __snake_case : Dict="replace" , __snake_case : int="<s>" , __snake_case : Any="</s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : List[str]="<pad>" , __snake_case : int="<mask>" , __snake_case : int=False , __snake_case : str=True , **__snake_case : Tuple , ): super().__init__( __snake_case , __snake_case , tokenizer_file=__snake_case , errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case , **__snake_case , ) a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , __snake_case ) != add_prefix_space: a : List[Any] = getattr(__snake_case , pre_tok_state.pop('type' ) ) a : Optional[Any] = add_prefix_space a : Optional[Any] = pre_tok_class(**__snake_case ) a : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` a : Dict = 'post_processor' a : int = getattr(self.backend_tokenizer , __snake_case , __snake_case ) if tokenizer_component_instance: a : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a : Any = tuple(state['sep'] ) if "cls" in state: a : Any = tuple(state['cls'] ) a : Optional[Any] = False if state.get('add_prefix_space' , __snake_case ) != add_prefix_space: a : Any = add_prefix_space a : Optional[Any] = True if state.get('trim_offsets' , __snake_case ) != trim_offsets: a : List[Any] = trim_offsets a : Union[str, Any] = True if changes_to_apply: a : int = getattr(__snake_case , state.pop('type' ) ) a : List[Any] = component_class(**__snake_case ) setattr(self.backend_tokenizer , __snake_case , __snake_case ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def lowercase_ ( self : Dict ): if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def lowercase_ ( self : Dict , __snake_case : List[str] ): a : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else value a : Optional[int] = value def lowercase_ ( self : Optional[Any] , *__snake_case : Any , **__snake_case : Union[str, Any] ): a : Dict = kwargs.get('is_split_into_words' , __snake_case ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*__snake_case , **__snake_case ) def lowercase_ ( self : Union[str, Any] , *__snake_case : Optional[int] , **__snake_case : List[str] ): a : Optional[int] = kwargs.get('is_split_into_words' , __snake_case ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.' ) return super()._encode_plus(*__snake_case , **__snake_case ) def lowercase_ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ): a : Union[str, Any] = self._tokenizer.model.save(__snake_case , name=__snake_case ) return tuple(__snake_case ) def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : int=None ): a : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ): a : int = [self.sep_token_id] a : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase_ ( self : List[str] , __snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ): a : Optional[Any] = super()._pad( encoded_inputs=__snake_case , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , ) # Load from model defaults if return_attention_mask is None: a : str = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: a : Any = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. a : Union[str, Any] = len(encoded_inputs['global_attention_mask'] ) != len(__snake_case ) if needs_to_be_padded: a : str = len(__snake_case ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` a : Dict = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": a : Union[str, Any] = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
297
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class a__( metaclass=lowerCamelCase__ ): lowercase__ = ["""flax"""] def __init__( self : Tuple , *__snake_case : Any , **__snake_case : List[Any] ): requires_backends(self , ['flax'] ) @classmethod def lowercase_ ( cls : Dict , *__snake_case : Optional[int] , **__snake_case : Tuple ): requires_backends(cls , ['flax'] ) @classmethod def lowercase_ ( cls : Optional[int] , *__snake_case : Optional[Any] , **__snake_case : Any ): requires_backends(cls , ['flax'] ) class a__( metaclass=lowerCamelCase__ ): lowercase__ = ["""flax"""] def __init__( self : Tuple , *__snake_case : Any , **__snake_case : int ): requires_backends(self , ['flax'] ) @classmethod def lowercase_ ( cls : Optional[int] , *__snake_case : List[Any] , **__snake_case : Tuple ): requires_backends(cls , ['flax'] ) @classmethod def lowercase_ ( cls : Union[str, Any] , *__snake_case : List[str] , **__snake_case : List[Any] ): requires_backends(cls , ['flax'] ) class a__( metaclass=lowerCamelCase__ ): lowercase__ = ["""flax"""] def __init__( self : Union[str, Any] , *__snake_case : Tuple , **__snake_case : Optional[Any] ): requires_backends(self , ['flax'] ) @classmethod def lowercase_ ( cls : List[Any] , *__snake_case : Dict , **__snake_case : str ): requires_backends(cls , ['flax'] ) @classmethod def lowercase_ ( cls : str , *__snake_case : List[Any] , **__snake_case : str ): requires_backends(cls , ['flax'] ) class a__( metaclass=lowerCamelCase__ ): lowercase__ = ["""flax"""] def __init__( self : Optional[int] , *__snake_case : List[Any] , **__snake_case : Optional[int] ): requires_backends(self , ['flax'] ) @classmethod def lowercase_ ( cls : Tuple , *__snake_case : int , **__snake_case : List[str] ): requires_backends(cls , ['flax'] ) @classmethod def lowercase_ ( cls : List[str] , *__snake_case : Optional[Any] , **__snake_case : Optional[Any] ): requires_backends(cls , ['flax'] ) class a__( metaclass=lowerCamelCase__ ): lowercase__ = ["""flax"""] def __init__( self : Optional[int] , *__snake_case : List[Any] , **__snake_case : List[Any] ): requires_backends(self , ['flax'] ) @classmethod def lowercase_ ( cls : int , *__snake_case : List[str] , **__snake_case : Union[str, Any] ): requires_backends(cls , ['flax'] ) @classmethod def lowercase_ ( cls : str , *__snake_case : Dict , **__snake_case : int ): requires_backends(cls , ['flax'] ) class a__( metaclass=lowerCamelCase__ ): lowercase__ = ["""flax"""] def __init__( self : Tuple , *__snake_case : Any , **__snake_case : Any ): requires_backends(self , ['flax'] ) @classmethod def lowercase_ ( cls : Any , *__snake_case : Dict , **__snake_case : Union[str, Any] ): requires_backends(cls , ['flax'] ) @classmethod def lowercase_ ( cls : Any , *__snake_case : Tuple , **__snake_case : Optional[int] ): requires_backends(cls , ['flax'] ) class a__( metaclass=lowerCamelCase__ ): lowercase__ = ["""flax"""] def __init__( self : str , *__snake_case : List[Any] , **__snake_case : str ): requires_backends(self , ['flax'] ) @classmethod def lowercase_ ( cls : str , *__snake_case : Tuple , **__snake_case : Dict ): requires_backends(cls , ['flax'] ) @classmethod def lowercase_ ( cls : List[Any] , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ): requires_backends(cls , ['flax'] ) class a__( metaclass=lowerCamelCase__ ): lowercase__ = ["""flax"""] def __init__( self : List[Any] , *__snake_case : List[Any] , **__snake_case : List[str] ): requires_backends(self , ['flax'] ) @classmethod def lowercase_ ( cls : Any , *__snake_case : List[Any] , **__snake_case : int ): requires_backends(cls , ['flax'] ) @classmethod def lowercase_ ( cls : Any , *__snake_case : Union[str, Any] , **__snake_case : Any ): requires_backends(cls , ['flax'] ) class a__( metaclass=lowerCamelCase__ ): lowercase__ = ["""flax"""] def __init__( self : List[Any] , *__snake_case : Union[str, Any] , **__snake_case : Dict ): requires_backends(self , ['flax'] ) @classmethod def lowercase_ ( cls : Dict , *__snake_case : Optional[Any] , **__snake_case : Optional[Any] ): requires_backends(cls , ['flax'] ) @classmethod def lowercase_ ( cls : int , *__snake_case : Dict , **__snake_case : int ): requires_backends(cls , ['flax'] ) class a__( metaclass=lowerCamelCase__ ): lowercase__ = ["""flax"""] def __init__( self : Optional[Any] , *__snake_case : Optional[Any] , **__snake_case : str ): requires_backends(self , ['flax'] ) @classmethod def lowercase_ ( cls : List[Any] , *__snake_case : Optional[int] , **__snake_case : Any ): requires_backends(cls , ['flax'] ) @classmethod def lowercase_ ( cls : List[str] , *__snake_case : int , **__snake_case : Optional[Any] ): requires_backends(cls , ['flax'] ) class a__( metaclass=lowerCamelCase__ ): lowercase__ = ["""flax"""] def __init__( self : Optional[int] , *__snake_case : Optional[Any] , **__snake_case : List[str] ): requires_backends(self , ['flax'] ) @classmethod def lowercase_ ( cls : Any , *__snake_case : Union[str, Any] , **__snake_case : Optional[Any] ): requires_backends(cls , ['flax'] ) @classmethod def lowercase_ ( cls : Dict , *__snake_case : Dict , **__snake_case : Dict ): requires_backends(cls , ['flax'] ) class a__( metaclass=lowerCamelCase__ ): lowercase__ = ["""flax"""] def __init__( self : Tuple , *__snake_case : Dict , **__snake_case : Optional[int] ): requires_backends(self , ['flax'] ) @classmethod def lowercase_ ( cls : Optional[int] , *__snake_case : Union[str, Any] , **__snake_case : Tuple ): requires_backends(cls , ['flax'] ) @classmethod def lowercase_ ( cls : Optional[Any] , *__snake_case : List[str] , **__snake_case : Any ): requires_backends(cls , ['flax'] ) class a__( metaclass=lowerCamelCase__ ): lowercase__ = ["""flax"""] def __init__( self : Optional[int] , *__snake_case : List[Any] , **__snake_case : str ): requires_backends(self , ['flax'] ) @classmethod def lowercase_ ( cls : Dict , *__snake_case : List[Any] , **__snake_case : List[Any] ): requires_backends(cls , ['flax'] ) @classmethod def lowercase_ ( cls : Tuple , *__snake_case : Tuple , **__snake_case : Any ): requires_backends(cls , ['flax'] )
297
'''simple docstring''' import copy import os import cva import numpy as np from matplotlib import pyplot as plt class a__: def __init__( self : Tuple ): a : Optional[int] = '' a : Optional[Any] = '' a : str = [] a : int = 0 a : str = 2_56 a : Union[str, Any] = 0 a : Any = 0 a : Optional[int] = 0 a : List[str] = 0 def lowercase_ ( self : str , __snake_case : str ): a : Any = cva.imread(__snake_case , 0 ) a : Optional[Any] = copy.deepcopy(self.img ) a , a , a : int = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' ) a : Optional[int] = np.sum(__snake_case ) for i in range(len(__snake_case ) ): a : Optional[Any] = x[i] / self.k self.sk += prk a : str = (self.L - 1) * self.sk if self.rem != 0: a : Optional[int] = int(last % last ) a : int = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(__snake_case ) a : str = int(np.ma.count(self.img ) / self.img[1].size ) a : Optional[int] = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): a : Any = self.img[j][i] if num != self.last_list[num]: a : str = self.last_list[num] cva.imwrite('output_data/output.jpg' , self.img ) def lowercase_ ( self : Dict ): plt.hist(self.img.ravel() , 2_56 , [0, 2_56] ) def lowercase_ ( self : List[Any] ): cva.imshow('Output-Image' , self.img ) cva.imshow('Input-Image' , self.original_image ) cva.waitKey(50_00 ) cva.destroyAllWindows() if __name__ == "__main__": lowerCAmelCase: Optional[Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg') lowerCAmelCase: Tuple = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
297
1
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowerCAmelCase: Optional[int] = datasets.utils.logging.get_logger(__name__) lowerCAmelCase: Optional[int] = ['names', 'prefix'] lowerCAmelCase: Union[str, Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] lowerCAmelCase: List[str] = ['encoding_errors', 'on_bad_lines'] lowerCAmelCase: Tuple = ['date_format'] @dataclass class a__( datasets.BuilderConfig ): lowercase__ = "," lowercase__ = None lowercase__ = "infer" lowercase__ = None lowercase__ = None lowercase__ = None lowercase__ = None lowercase__ = None lowercase__ = True lowercase__ = None lowercase__ = None lowercase__ = None lowercase__ = None lowercase__ = False lowercase__ = None lowercase__ = None lowercase__ = None lowercase__ = True lowercase__ = True lowercase__ = False lowercase__ = True lowercase__ = None lowercase__ = "." lowercase__ = None lowercase__ = '"' lowercase__ = 0 lowercase__ = None lowercase__ = None lowercase__ = None lowercase__ = None lowercase__ = True lowercase__ = True lowercase__ = 0 lowercase__ = True lowercase__ = False lowercase__ = None lowercase__ = 1_00_00 lowercase__ = None lowercase__ = "strict" lowercase__ = "error" lowercase__ = None def lowercase_ ( self : int ): if self.delimiter is not None: a : Union[str, Any] = self.delimiter if self.column_names is not None: a : Optional[int] = self.column_names @property def lowercase_ ( self : Tuple ): a : Union[str, Any] = { 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __snake_case ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class a__( datasets.ArrowBasedBuilder ): lowercase__ = CsvConfig def lowercase_ ( self : Any ): return datasets.DatasetInfo(features=self.config.features ) def lowercase_ ( self : List[Any] , __snake_case : Tuple ): if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) a : str = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__snake_case , (str, list, tuple) ): a : Union[str, Any] = data_files if isinstance(__snake_case , __snake_case ): a : Dict = [files] a : Any = [dl_manager.iter_files(__snake_case ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] a : Any = [] for split_name, files in data_files.items(): if isinstance(__snake_case , __snake_case ): a : Optional[Any] = [files] a : Optional[Any] = [dl_manager.iter_files(__snake_case ) for file in files] splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={'files': files} ) ) return splits def lowercase_ ( self : List[Any] , __snake_case : pa.Table ): if self.config.features is not None: a : Any = self.config.features.arrow_schema if all(not require_storage_cast(__snake_case ) for feature in self.config.features.values() ): # cheaper cast a : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__snake_case ) else: # more expensive cast; allows str <-> int/float or str to Audio for example a : List[Any] = table_cast(__snake_case , __snake_case ) return pa_table def lowercase_ ( self : Any , __snake_case : Optional[Any] ): a : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str a : Optional[Any] = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(__snake_case ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(__snake_case ) ): a : List[Any] = pd.read_csv(__snake_case , iterator=__snake_case , dtype=__snake_case , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(__snake_case ): a : Union[str, Any] = pa.Table.from_pandas(__snake_case ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__snake_case ) except ValueError as e: logger.error(F"""Failed to read file '{file}' with error {type(__snake_case )}: {e}""" ) raise
297
'''simple docstring''' import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class a__: def __init__( self : List[Any] , __snake_case : Union[str, Any] ): if isinstance(__snake_case , __snake_case ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden a : str = deepcopy(__snake_case ) elif os.path.exists(__snake_case ): with io.open(__snake_case , 'r' , encoding='utf-8' ) as f: a : Optional[Any] = json.load(__snake_case ) else: try: a : Any = baseaa.urlsafe_baadecode(__snake_case ).decode('utf-8' ) a : Union[str, Any] = json.loads(__snake_case ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" ) a : List[str] = config self.set_stage_and_offload() def lowercase_ ( self : List[str] ): # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs to know the dtype, and some other hparams. a : Dict = self.get_value('zero_optimization.stage' , -1 ) # offload a : str = False if self.is_zeroa() or self.is_zeroa(): a : Union[str, Any] = set(['cpu', 'nvme'] ) a : Optional[Any] = set( [ self.get_value('zero_optimization.offload_optimizer.device' ), self.get_value('zero_optimization.offload_param.device' ), ] ) if len(offload_devices & offload_devices_valid ) > 0: a : List[str] = True def lowercase_ ( self : Optional[Any] , __snake_case : Optional[Any] ): a : str = self.config # find the config node of interest if it exists a : List[str] = ds_key_long.split('.' ) a : Dict = nodes.pop() for node in nodes: a : List[Any] = config.get(__snake_case ) if config is None: return None, ds_key return config, ds_key def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any=None ): a , a : List[Any] = self.find_config_node(__snake_case ) if config is None: return default return config.get(__snake_case , __snake_case ) def lowercase_ ( self : int , __snake_case : Optional[Any] , __snake_case : List[str]=False ): a : Optional[Any] = self.config # find the config node of interest if it exists a : List[str] = ds_key_long.split('.' ) for node in nodes: a : str = config a : Dict = config.get(__snake_case ) if config is None: if must_exist: raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" ) else: return # if found remove it if parent_config is not None: parent_config.pop(__snake_case ) def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] ): a : Union[str, Any] = self.get_value(__snake_case ) return False if value is None else bool(__snake_case ) def lowercase_ ( self : Union[str, Any] , __snake_case : str ): a : Optional[Any] = self.get_value(__snake_case ) return False if value is None else not bool(__snake_case ) def lowercase_ ( self : Optional[Any] ): return self._stage == 2 def lowercase_ ( self : Union[str, Any] ): return self._stage == 3 def lowercase_ ( self : str ): return self._offload class a__: def __init__( self : Tuple , __snake_case : str ): a : Optional[Any] = engine def lowercase_ ( self : Union[str, Any] , __snake_case : str , **__snake_case : Tuple ): # runs backpropagation and handles mixed precision self.engine.backward(__snake_case , **__snake_case ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class a__( lowerCamelCase__ ): def __init__( self : str , __snake_case : List[str] ): super().__init__(__snake_case , device_placement=__snake_case , scaler=__snake_case ) a : Optional[Any] = hasattr(self.optimizer , 'overflow' ) def lowercase_ ( self : Dict , __snake_case : Dict=None ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def lowercase_ ( self : Optional[Any] ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def lowercase_ ( self : Tuple ): if self.__has_overflow__: return self.optimizer.overflow return False class a__( lowerCamelCase__ ): def __init__( self : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ): super().__init__(__snake_case , __snake_case ) def lowercase_ ( self : Any ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class a__: def __init__( self : List[Any] , __snake_case : str , __snake_case : Dict=0.001 , __snake_case : Union[str, Any]=0 , **__snake_case : List[Any] ): a : Optional[Any] = params a : str = lr a : List[str] = weight_decay a : str = kwargs class a__: def __init__( self : str , __snake_case : Optional[Any] , __snake_case : List[str]=None , __snake_case : Tuple=0 , **__snake_case : Any ): a : Union[str, Any] = optimizer a : Any = total_num_steps a : List[str] = warmup_num_steps a : int = kwargs
297
1
'''simple docstring''' import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a__( lowerCamelCase__ , unittest.TestCase ): lowercase__ = None lowercase__ = BloomTokenizerFast lowercase__ = BloomTokenizerFast lowercase__ = True lowercase__ = False lowercase__ = """tokenizer_file""" lowercase__ = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""} def lowercase_ ( self : Optional[Any] ): super().setUp() a : List[str] = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self : Optional[Any] , **__snake_case : str ): kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case ) def lowercase_ ( self : Dict ): a : Dict = self.get_rust_tokenizer() a : Tuple = ['The quick brown fox</s>', 'jumps over the lazy dog</s>'] a : List[str] = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]] a : str = tokenizer.batch_encode_plus(__snake_case )['input_ids'] self.assertListEqual(__snake_case , __snake_case ) a : Optional[Any] = tokenizer.batch_decode(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) def lowercase_ ( self : Dict , __snake_case : str=6 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a : Optional[int] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input a : List[Any] = 'This is a simple input' a : Union[str, Any] = ['This is a simple input 1', 'This is a simple input 2'] a : int = ('This is a simple input', 'This is a pair') a : Tuple = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests try: tokenizer_r.encode(__snake_case , max_length=__snake_case ) tokenizer_r.encode_plus(__snake_case , max_length=__snake_case ) tokenizer_r.batch_encode_plus(__snake_case , max_length=__snake_case ) tokenizer_r.encode(__snake_case , max_length=__snake_case ) tokenizer_r.batch_encode_plus(__snake_case , max_length=__snake_case ) except ValueError: self.fail('Bloom Tokenizer should be able to deal with padding' ) a : str = None # Hotfixing padding = None self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding='max_length' ) # Simple input self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding='max_length' ) # Simple input self.assertRaises( __snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding='max_length' , ) # Pair input self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding='max_length' ) # Pair input self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding='max_length' ) # Pair input self.assertRaises( __snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding='max_length' , ) def lowercase_ ( self : Dict ): a : Any = self.get_rust_tokenizer() a : List[Any] = load_dataset('xnli' , 'all_languages' , split='test' , streaming=__snake_case ) a : Optional[Any] = next(iter(__snake_case ) )['premise'] # pick up one data a : Dict = list(sample_data.values() ) a : List[Any] = list(map(tokenizer.encode , __snake_case ) ) a : Optional[int] = [tokenizer.decode(__snake_case , clean_up_tokenization_spaces=__snake_case ) for x in output_tokens] self.assertListEqual(__snake_case , __snake_case ) def lowercase_ ( self : Any ): # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
297
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) lowerCAmelCase: int = [ 'cross_validation.py', 'gradient_accumulation.py', 'local_sgd.py', 'multi_process_metrics.py', 'memory.py', 'automatic_gradient_accumulation.py', 'fsdp_with_peak_mem_tracking.py', 'deepspeed_with_config_support.py', 'megatron_lm_gpt_pretraining.py', ] class a__( unittest.TestCase ): def lowercase_ ( self : int , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ): a : Optional[int] = None a : Tuple = os.path.abspath(os.path.join('examples' , 'by_feature' ) ) a : List[str] = os.path.abspath('examples' ) for item in os.listdir(__snake_case ): if item not in EXCLUDE_EXAMPLES: a : int = os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ) and ".py" in item_path: with self.subTest( tested_script=__snake_case , feature_script=__snake_case , tested_section='main()' if parser_only else 'training_function()' , ): a : List[Any] = compare_against_test( os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case ) a : Union[str, Any] = '\n'.join(__snake_case ) if special_strings is not None: for string in special_strings: a : Union[str, Any] = diff.replace(__snake_case , '' ) self.assertEqual(__snake_case , '' ) def lowercase_ ( self : Optional[Any] ): self.one_complete_example('complete_nlp_example.py' , __snake_case ) self.one_complete_example('complete_nlp_example.py' , __snake_case ) def lowercase_ ( self : Any ): a : Dict = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) ) a : int = [ ' ' * 16 + '{\n\n', ' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n', ' ' * 20 + '"f1": eval_metric["f1"],\n\n', ' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n', ' ' * 20 + '"epoch": epoch,\n\n', ' ' * 16 + '},\n\n', ' ' * 16 + 'step=epoch,\n', ' ' * 12, ' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n', ] self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case ) self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case ) @mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} ) class a__( lowerCamelCase__ ): lowercase__ = False @classmethod def lowercase_ ( cls : Optional[int] ): super().setUpClass() a : List[str] = tempfile.mkdtemp() a : Tuple = os.path.join(cls._tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) a : Optional[int] = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def lowercase_ ( cls : Optional[int] ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def lowercase_ ( self : Tuple ): a : Union[str, Any] = F""" examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) ) def lowercase_ ( self : Dict ): a : Union[str, Any] = F""" examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} """.split() a : int = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) ) def lowercase_ ( self : Any ): a : Tuple = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )} """.split() a : int = run_command(self._launch_args + testargs , return_stdout=__snake_case ) self.assertNotIn('epoch 0:' , __snake_case ) self.assertIn('epoch 1:' , __snake_case ) def lowercase_ ( self : int ): a : Optional[int] = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )} """.split() a : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=__snake_case ) if torch.cuda.is_available(): a : Any = torch.cuda.device_count() else: a : str = 1 if num_processes > 1: self.assertNotIn('epoch 0:' , __snake_case ) self.assertIn('epoch 1:' , __snake_case ) else: self.assertIn('epoch 0:' , __snake_case ) self.assertIn('epoch 1:' , __snake_case ) @slow def lowercase_ ( self : Tuple ): a : Tuple = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split() with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ): a : Any = run_command(self._launch_args + testargs , return_stdout=__snake_case ) a : Optional[Any] = re.findall('({.+})' , __snake_case ) a : str = [r for r in results if 'accuracy' in r][-1] a : str = ast.literal_eval(__snake_case ) self.assertGreaterEqual(results['accuracy'] , 0.75 ) def lowercase_ ( self : Optional[int] ): a : int = ['examples/by_feature/multi_process_metrics.py'] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def lowercase_ ( self : Optional[int] ): with tempfile.TemporaryDirectory() as tmpdir: a : Optional[Any] = F""" examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__snake_case , 'tracking' ) ) ) def lowercase_ ( self : List[str] ): a : Optional[Any] = ['examples/by_feature/gradient_accumulation.py'] run_command(self._launch_args + testargs ) def lowercase_ ( self : int ): a : Optional[Any] = ['examples/by_feature/local_sgd.py'] run_command(self._launch_args + testargs )
297
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase: List[Any] = { 'configuration_bigbird_pegasus': [ 'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BigBirdPegasusConfig', 'BigBirdPegasusOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Union[str, Any] = [ 'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST', 'BigBirdPegasusForCausalLM', 'BigBirdPegasusForConditionalGeneration', 'BigBirdPegasusForQuestionAnswering', 'BigBirdPegasusForSequenceClassification', 'BigBirdPegasusModel', 'BigBirdPegasusPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
297
'''simple docstring''' import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList lowerCAmelCase: List[str] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class a__( lowerCamelCase__ ): def __init__( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : Optional[Any]=1 ): a : Union[str, Any] = tokenizer a : Union[str, Any] = dataset a : Any = len(__snake_case ) if n_tasks is None else n_tasks a : List[str] = n_copies def __iter__( self : str ): a : List[Any] = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() ) a : Dict = self.tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class a__( lowerCamelCase__ ): def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str ): a : Dict = start_length a : Dict = eof_strings a : str = tokenizer def __call__( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : Union[str, Any] ): a : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) a : Optional[int] = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(__snake_case ) def lowerCamelCase__ ( _A ): a : Optional[Any] = re.split('(%s)' % '|'.join(_A ) , _A ) # last string should be "" return "".join(string_list[:-2] ) def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A=20 , **_A ): a : Optional[Any] = defaultdict(_A ) # dict of list of generated tokens for step, batch in tqdm(enumerate(_A ) ): with torch.no_grad(): a : Optional[Any] = batch['ids'].shape[-1] a : Optional[Any] = accelerator.unwrap_model(_A ).generate( input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_A , **_A ) # each task is generated batch_size times a : Tuple = batch['task_id'].repeat(_A ) a : List[Any] = accelerator.pad_across_processes( _A , dim=1 , pad_index=tokenizer.pad_token_id ) a , a : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) ) a : List[str] = generated_tokens.cpu().numpy() a : int = generated_tasks.cpu().numpy() for task, generated_tokens in zip(_A , _A ): gen_token_dict[task].append(_A ) a : Any = [[] for _ in range(_A )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: a : Optional[int] = tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A ) code_gens[task].append(remove_last_block(_A ) ) return code_gens def lowerCamelCase__ ( ): # Setup configuration a : Dict = HfArgumentParser(_A ) a : Any = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric a : List[Any] = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing a : int = 'false' if args.num_workers is None: a : Dict = multiprocessing.cpu_count() # Use dataset load to feed to accelerate a : List[Any] = Accelerator() set_seed(args.seed , device_specific=_A ) # Load model and tokenizer a : Dict = AutoTokenizer.from_pretrained(args.model_ckpt ) a : str = tokenizer.eos_token a : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings a : Optional[Any] = { 'do_sample': args.do_sample, 'temperature': args.temperature, 'max_new_tokens': args.max_new_tokens, 'top_p': args.top_p, 'top_k': args.top_k, 'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _A , _A )] ), } # Load evaluation dataset and metric a : Optional[int] = load_dataset('openai_humaneval' ) a : Optional[Any] = load_metric('code_eval' ) a : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] ) a : Optional[Any] = args.n_samples // args.batch_size a : Any = TokenizedDataset(_A , human_eval['test'] , n_copies=_A , n_tasks=_A ) # do not confuse args.batch_size, which is actually the num_return_sequences a : int = DataLoader(_A , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: a : int = code_eval_metric.compute(references=[''] , predictions=[['']] ) except ValueError as exception: print( 'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`' ' flag to enable code evaluation.' ) raise exception a , a : int = accelerator.prepare(_A , _A ) a : int = complete_code( _A , _A , _A , _A , n_tasks=_A , batch_size=args.batch_size , **_A , ) if accelerator.is_main_process: a : List[str] = [] for task in tqdm(range(_A ) ): a : int = human_eval['test'][task]['test'] a : int = f"""check({human_eval["test"][task]["entry_point"]})""" references.append('\n' + test_func + '\n' + entry_point ) # Evaluate completions with "code_eval" metric a , a : Tuple = code_eval_metric.compute( references=_A , predictions=_A , num_workers=args.num_workers ) print(f"""Results: {pass_at_k}""" ) # Save results to json file with open(args.output_file , 'w' ) as fp: json.dump(_A , _A ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
297
1
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase: Union[str, Any] = logging.get_logger(__name__) # TODO Update this lowerCAmelCase: Union[str, Any] = { 'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json', # See all ESM models at https://huggingface.co/models?filter=esm } class a__( lowerCamelCase__ ): lowercase__ = """esm""" def __init__( self : int , __snake_case : str=None , __snake_case : Union[str, Any]=None , __snake_case : str=None , __snake_case : Tuple=7_68 , __snake_case : Optional[Any]=12 , __snake_case : Dict=12 , __snake_case : str=30_72 , __snake_case : List[Any]=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : Any=10_26 , __snake_case : Any=0.02 , __snake_case : str=1e-1_2 , __snake_case : Optional[Any]="absolute" , __snake_case : Any=True , __snake_case : List[str]=None , __snake_case : Optional[int]=False , __snake_case : Dict=False , __snake_case : Any=None , __snake_case : Tuple=None , **__snake_case : Optional[int] , ): super().__init__(pad_token_id=__snake_case , mask_token_id=__snake_case , **__snake_case ) a : Tuple = vocab_size a : str = hidden_size a : Optional[int] = num_hidden_layers a : List[Any] = num_attention_heads a : Dict = intermediate_size a : List[str] = hidden_dropout_prob a : Optional[Any] = attention_probs_dropout_prob a : Any = max_position_embeddings a : Any = initializer_range a : Union[str, Any] = layer_norm_eps a : Any = position_embedding_type a : Optional[Any] = use_cache a : Dict = emb_layer_norm_before a : Optional[Any] = token_dropout a : str = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('No esmfold_config supplied for folding model, using default values.' ) a : Optional[Any] = EsmFoldConfig() elif isinstance(__snake_case , __snake_case ): a : Tuple = EsmFoldConfig(**__snake_case ) a : Optional[int] = esmfold_config if vocab_list is None: logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' ) a : str = get_default_vocab_list() else: a : Tuple = vocab_list else: a : Union[str, Any] = None a : Any = None if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , __snake_case ): raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' ) def lowercase_ ( self : Dict ): a : List[Any] = super().to_dict() if isinstance(self.esmfold_config , __snake_case ): a : Tuple = self.esmfold_config.to_dict() return output @dataclass class a__: lowercase__ = None lowercase__ = True lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = 0 lowercase__ = True lowercase__ = False lowercase__ = 1_28 lowercase__ = None def lowercase_ ( self : Optional[int] ): if self.trunk is None: a : List[str] = TrunkConfig() elif isinstance(self.trunk , __snake_case ): a : List[str] = TrunkConfig(**self.trunk ) def lowercase_ ( self : str ): a : Dict = asdict(self ) a : Union[str, Any] = self.trunk.to_dict() return output @dataclass class a__: lowercase__ = 48 lowercase__ = 10_24 lowercase__ = 1_28 lowercase__ = 32 lowercase__ = 32 lowercase__ = 32 lowercase__ = 0 lowercase__ = 0 lowercase__ = False lowercase__ = 4 lowercase__ = 1_28 lowercase__ = None def lowercase_ ( self : Any ): if self.structure_module is None: a : Optional[int] = StructureModuleConfig() elif isinstance(self.structure_module , __snake_case ): a : Tuple = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got' F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got' F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" ) a : Tuple = self.sequence_state_dim // self.sequence_head_width a : Optional[Any] = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got' F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got' F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" ) if self.dropout >= 0.4: raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" ) def lowercase_ ( self : Tuple ): a : List[str] = asdict(self ) a : int = self.structure_module.to_dict() return output @dataclass class a__: lowercase__ = 3_84 lowercase__ = 1_28 lowercase__ = 16 lowercase__ = 1_28 lowercase__ = 12 lowercase__ = 4 lowercase__ = 8 lowercase__ = 0.1 lowercase__ = 8 lowercase__ = 1 lowercase__ = 2 lowercase__ = 7 lowercase__ = 10 lowercase__ = 1E-8 lowercase__ = 1E5 def lowercase_ ( self : Optional[Any] ): return asdict(self ) def lowerCamelCase__ ( ): return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
297
'''simple docstring''' import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def lowerCamelCase__ ( _A , _A , _A ): if isinstance(_A , torch.Tensor ): return image elif isinstance(_A , PIL.Image.Image ): a : Any = [image] if isinstance(image[0] , PIL.Image.Image ): a : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image] a : int = np.concatenate(_A , axis=0 ) a : int = np.array(_A ).astype(np.floataa ) / 255.0 a : str = image.transpose(0 , 3 , 1 , 2 ) a : str = 2.0 * image - 1.0 a : Optional[int] = torch.from_numpy(_A ) elif isinstance(image[0] , torch.Tensor ): a : Optional[Any] = torch.cat(_A , dim=0 ) return image def lowerCamelCase__ ( _A , _A , _A , _A=0.9995 ): if not isinstance(_A , np.ndarray ): a : Dict = True a : Optional[Any] = va.device a : Optional[int] = va.cpu().numpy() a : Union[str, Any] = va.cpu().numpy() a : Any = np.sum(va * va / (np.linalg.norm(_A ) * np.linalg.norm(_A )) ) if np.abs(_A ) > DOT_THRESHOLD: a : Any = (1 - t) * va + t * va else: a : Any = np.arccos(_A ) a : Tuple = np.sin(_A ) a : Optional[Any] = theta_a * t a : List[Any] = np.sin(_A ) a : Dict = np.sin(theta_a - theta_t ) / sin_theta_a a : int = sin_theta_t / sin_theta_a a : Any = sa * va + sa * va if inputs_are_torch: a : Dict = torch.from_numpy(_A ).to(_A ) return va def lowerCamelCase__ ( _A , _A ): a : Optional[int] = F.normalize(_A , dim=-1 ) a : str = F.normalize(_A , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def lowerCamelCase__ ( _A , _A ): for param in model.parameters(): a : int = value class a__( lowerCamelCase__ ): def __init__( self : str , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __snake_case : CLIPFeatureExtractor , __snake_case : List[str]=None , __snake_case : List[str]=None , __snake_case : List[Any]=None , ): super().__init__() self.register_modules( vae=__snake_case , text_encoder=__snake_case , clip_model=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , feature_extractor=__snake_case , coca_model=__snake_case , coca_tokenizer=__snake_case , coca_transform=__snake_case , ) a : Optional[Any] = ( feature_extractor.size if isinstance(feature_extractor.size , __snake_case ) else feature_extractor.size['shortest_edge'] ) a : Optional[int] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , __snake_case ) set_requires_grad(self.clip_model , __snake_case ) def lowercase_ ( self : int , __snake_case : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory a : Union[str, Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__snake_case ) def lowercase_ ( self : Union[str, Any] ): self.enable_attention_slicing(__snake_case ) def lowercase_ ( self : Optional[Any] ): set_requires_grad(self.vae , __snake_case ) def lowercase_ ( self : Tuple ): set_requires_grad(self.vae , __snake_case ) def lowercase_ ( self : int ): set_requires_grad(self.unet , __snake_case ) def lowercase_ ( self : Union[str, Any] ): set_requires_grad(self.unet , __snake_case ) def lowercase_ ( self : int , __snake_case : Dict , __snake_case : str , __snake_case : Optional[int] ): # get the original timestep using init_timestep a : Optional[Any] = min(int(num_inference_steps * strength ) , __snake_case ) a : Union[str, Any] = max(num_inference_steps - init_timestep , 0 ) a : List[Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def lowercase_ ( self : Dict , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any]=None ): if not isinstance(__snake_case , torch.Tensor ): raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(__snake_case )}""" ) a : Optional[Any] = image.to(device=__snake_case , dtype=__snake_case ) if isinstance(__snake_case , __snake_case ): a : Optional[int] = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case ) ] a : Optional[Any] = torch.cat(__snake_case , dim=0 ) else: a : Union[str, Any] = self.vae.encode(__snake_case ).latent_dist.sample(__snake_case ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : List[str] = 0.18215 * init_latents a : str = init_latents.repeat_interleave(__snake_case , dim=0 ) a : Dict = randn_tensor(init_latents.shape , generator=__snake_case , device=__snake_case , dtype=__snake_case ) # get latents a : Dict = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case ) a : int = init_latents return latents def lowercase_ ( self : List[str] , __snake_case : Dict ): a : List[Any] = self.coca_transform(__snake_case ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): a : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) a : Union[str, Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' ) def lowercase_ ( self : Tuple , __snake_case : Any , __snake_case : Optional[Any] ): a : List[Any] = self.feature_extractor.preprocess(__snake_case ) a : Optional[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half() a : int = self.clip_model.get_image_features(__snake_case ) a : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case ) a : Tuple = image_embeddings_clip.repeat_interleave(__snake_case , dim=0 ) return image_embeddings_clip @torch.enable_grad() def lowercase_ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[Any] , ): a : Optional[Any] = latents.detach().requires_grad_() a : List[Any] = self.scheduler.scale_model_input(__snake_case , __snake_case ) # predict the noise residual a : Any = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): a : int = self.scheduler.alphas_cumprod[timestep] a : Any = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf a : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 a : Tuple = torch.sqrt(__snake_case ) a : str = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , __snake_case ): a : List[Any] = self.scheduler.sigmas[index] a : Optional[int] = latents - sigma * noise_pred else: raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : Union[str, Any] = 1 / 0.18215 * sample a : str = self.vae.decode(__snake_case ).sample a : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) a : Tuple = transforms.Resize(self.feature_extractor_size )(__snake_case ) a : List[str] = self.normalize(__snake_case ).to(latents.dtype ) a : List[str] = self.clip_model.get_image_features(__snake_case ) a : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case ) a : int = spherical_dist_loss(__snake_case , __snake_case ).mean() * clip_guidance_scale a : List[str] = -torch.autograd.grad(__snake_case , __snake_case )[0] if isinstance(self.scheduler , __snake_case ): a : List[Any] = latents.detach() + grads * (sigma**2) a : Optional[int] = noise_pred_original else: a : List[Any] = noise_pred_original - torch.sqrt(__snake_case ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : Optional[int] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[int] = 5_12 , __snake_case : Optional[int] = 5_12 , __snake_case : float = 0.6 , __snake_case : Optional[int] = 50 , __snake_case : Optional[float] = 7.5 , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[float] = 1_00 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : float = 0.8 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , ): if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size: raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(__snake_case )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(__snake_case , torch.Generator ) and batch_size > 1: a : Dict = [generator] + [None] * (batch_size - 1) a : Any = [ ('model', self.coca_model is None), ('tokenizer', self.coca_tokenizer is None), ('transform', self.coca_transform is None), ] a : List[str] = [x[0] for x in coca_is_none if x[1]] a : List[str] = ', '.join(__snake_case ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(__snake_case ): raise ValueError( F"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) a : int = self.get_image_description(__snake_case ) if style_prompt is None: if len(__snake_case ): raise ValueError( F"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) a : Union[str, Any] = self.get_image_description(__snake_case ) # get prompt text embeddings for content and style a : Optional[Any] = self.tokenizer( __snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , ) a : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] a : Dict = self.tokenizer( __snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , ) a : Dict = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] a : Any = slerp(__snake_case , __snake_case , __snake_case ) # duplicate text embeddings for each generation per prompt a : Optional[Any] = text_embeddings.repeat_interleave(__snake_case , dim=0 ) # set timesteps a : int = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) a : Any = {} if accepts_offset: a : Optional[Any] = 1 self.scheduler.set_timesteps(__snake_case , **__snake_case ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) a , a : Tuple = self.get_timesteps(__snake_case , __snake_case , self.device ) a : Optional[int] = timesteps[:1].repeat(__snake_case ) # Preprocess image a : Optional[Any] = preprocess(__snake_case , __snake_case , __snake_case ) a : List[Any] = self.prepare_latents( __snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case ) a : str = preprocess(__snake_case , __snake_case , __snake_case ) a : Union[str, Any] = self.prepare_latents( __snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case ) a : Union[str, Any] = slerp(__snake_case , __snake_case , __snake_case ) if clip_guidance_scale > 0: a : Dict = self.get_clip_image_embeddings(__snake_case , __snake_case ) a : int = self.get_clip_image_embeddings(__snake_case , __snake_case ) a : List[str] = slerp( __snake_case , __snake_case , __snake_case ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. a : int = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: a : Any = content_text_input.input_ids.shape[-1] a : List[Any] = self.tokenizer([''] , padding='max_length' , max_length=__snake_case , return_tensors='pt' ) a : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt a : Dict = uncond_embeddings.repeat_interleave(__snake_case , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes a : Any = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. a : List[str] = (batch_size, self.unet.config.in_channels, height // 8, width // 8) a : List[str] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps a : int = torch.randn(__snake_case , generator=__snake_case , device='cpu' , dtype=__snake_case ).to( self.device ) else: a : Optional[int] = torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case ) else: if latents.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) a : List[str] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler a : Any = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] a : Optional[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) a : Union[str, Any] = {} if accepts_eta: a : List[str] = eta # check if the scheduler accepts generator a : List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: a : Any = generator with self.progress_bar(total=__snake_case ): for i, t in enumerate(__snake_case ): # expand the latents if we are doing classifier free guidance a : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a : Dict = self.scheduler.scale_model_input(__snake_case , __snake_case ) # predict the noise residual a : List[Any] = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample # perform classifier free guidance if do_classifier_free_guidance: a , a : List[str] = noise_pred.chunk(2 ) a : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: a : Optional[Any] = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) a , a : Union[str, Any] = self.cond_fn( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) # compute the previous noisy sample x_t -> x_t-1 a : Any = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : Tuple = 1 / 0.18215 * latents a : Optional[int] = self.vae.decode(__snake_case ).sample a : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) a : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a : str = self.numpy_to_pil(__snake_case ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case )
297
1
def _a ( a :str ) -> str: return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
0
'''simple docstring''' def lowerCamelCase__ ( _A , _A , _A , _A , _A , ): a : Dict = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('All input parameters must be positive' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('Relative densities cannot be greater than one' ) else: a : Union[str, Any] = 1 - (matter_density + radiation_density + dark_energy) a : Union[str, Any] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) a : int = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation lowerCAmelCase: Optional[Any] = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
297
0