code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor a_ : List[Any] = logging.get_logger(__name__) class _snake_case ( A__ ): def __init__( self , *a , **a) -> None: warnings.warn( 'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ChineseCLIPImageProcessor instead.' , a , ) super().__init__(*a , **a)
73
def lowerCamelCase__ (_UpperCAmelCase = 10 , _UpperCAmelCase = 1000 , _UpperCAmelCase = True): assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError('Invalid value for min_val or max_val (min_value < max_value)') return min_val if option else max_val def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return int((number_a + number_a) / 2) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) ), 'argument values must be type of "int"' if lower > higher: raise ValueError('argument value for lower and higher must be(lower > higher)') if not lower < to_guess < higher: raise ValueError( 'guess value must be within the range of lower and higher value') def answer(_UpperCAmelCase) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print('started...') SCREAMING_SNAKE_CASE = lower SCREAMING_SNAKE_CASE = higher SCREAMING_SNAKE_CASE = [] while True: SCREAMING_SNAKE_CASE = get_avg(_UpperCAmelCase , _UpperCAmelCase) last_numbers.append(_UpperCAmelCase) if answer(_UpperCAmelCase) == "low": SCREAMING_SNAKE_CASE = number elif answer(_UpperCAmelCase) == "high": SCREAMING_SNAKE_CASE = number else: break print(F'''guess the number : {last_numbers[-1]}''') print(F'''details : {last_numbers!s}''') def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = int(input('Enter lower value : ').strip()) SCREAMING_SNAKE_CASE = int(input('Enter high value : ').strip()) SCREAMING_SNAKE_CASE = int(input('Enter value to guess : ').strip()) guess_the_number(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) if __name__ == "__main__": main()
73
1
a_ : dict[str, float] = { "joule": 1.0, "kilojoule": 10_00, "megajoule": 1_00_00_00, "gigajoule": 10_00_00_00_00, "wattsecond": 1.0, "watthour": 36_00, "kilowatthour": 3_60_00_00, "newtonmeter": 1.0, "calorie_nutr": 41_86.8, "kilocalorie_nutr": 4_18_68_00.00, "electronvolt": 1.6_02_17_66_34E-19, "britishthermalunit_it": 10_55.0_55_85, "footpound": 1.355_818, } def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: SCREAMING_SNAKE_CASE = ( F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' F'''Valid values are: {', '.join(_UpperCAmelCase)}''' ) raise ValueError(_UpperCAmelCase) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
73
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class _snake_case : def __init__( self , a , a=13 , a=7 , a=True , a=True , a=False , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Union[str, Any]: SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_input_mask SCREAMING_SNAKE_CASE = use_token_type_ids SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = type_sequence_label_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = num_choices SCREAMING_SNAKE_CASE = scope def SCREAMING_SNAKE_CASE__ ( self) -> Dict: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE = None if self.use_input_mask: SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length]) SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices) SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , use_stable_embedding=a , ) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Any: SCREAMING_SNAKE_CASE = OpenLlamaModel(config=a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a) SCREAMING_SNAKE_CASE = model(a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = OpenLlamaModel(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , ) SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , ) SCREAMING_SNAKE_CASE = model(a , attention_mask=a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> int: SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a) model.to(a) model.eval() # first forward pass SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , ) SCREAMING_SNAKE_CASE = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size) SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1) SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0] SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0] # select random slice SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a , a , atol=1E-3)) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) = config_and_inputs SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _snake_case ( A__ , A__ , A__ , unittest.TestCase ): _lowercase : List[Any] = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) _lowercase : str = (OpenLlamaForCausalLM,) if is_torch_available() else () _lowercase : List[str] = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) _lowercase : List[str] = False _lowercase : Optional[int] = False def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , hidden_size=37) def SCREAMING_SNAKE_CASE__ ( self) -> str: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*a) def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = input_dict['input_ids'] SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a) SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = 'single_label_classification' SCREAMING_SNAKE_CASE = input_dict['input_ids'] SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a) SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = 'multi_label_classification' SCREAMING_SNAKE_CASE = input_dict['input_ids'] SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a) SCREAMING_SNAKE_CASE = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test') def SCREAMING_SNAKE_CASE__ ( self) -> Any: pass @parameterized.expand([('linear',), ('dynamic',)]) def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = ids_tensor([1, 10] , config.vocab_size) SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights SCREAMING_SNAKE_CASE = OpenLlamaModel(a) original_model.to(a) original_model.eval() SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0} SCREAMING_SNAKE_CASE = OpenLlamaModel(a) scaled_model.to(a) scaled_model.eval() SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(a , a , atol=1E-5)) else: self.assertFalse(torch.allclose(a , a , atol=1E-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(a , a , atol=1E-5))
73
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ : Any = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ 'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMAEForPreTraining', 'ViTMAELayer', 'ViTMAEModel', 'ViTMAEPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = [ 'TFViTMAEForPreTraining', 'TFViTMAEModel', 'TFViTMAEPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys a_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
73
from __future__ import annotations a_ : str = [] def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): for i in range(len(_UpperCAmelCase)): if board[row][i] == 1: return False for i in range(len(_UpperCAmelCase)): if board[i][column] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , -1 , -1)): if board[i][j] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , len(_UpperCAmelCase))): if board[i][j] == 1: return False return True def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): if row >= len(_UpperCAmelCase): solution.append(_UpperCAmelCase) printboard(_UpperCAmelCase) print() return True for i in range(len(_UpperCAmelCase)): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = 1 solve(_UpperCAmelCase , row + 1) SCREAMING_SNAKE_CASE = 0 return False def lowerCamelCase__ (_UpperCAmelCase): for i in range(len(_UpperCAmelCase)): for j in range(len(_UpperCAmelCase)): if board[i][j] == 1: print('Q' , end=' ') else: print('.' , end=' ') print() # n=int(input("The no. of queens")) a_ : Tuple = 8 a_ : int = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
73
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=_UpperCAmelCase) SCREAMING_SNAKE_CASE = parser.add_subparsers(help='accelerate command helpers') # Register commands get_config_parser(subparsers=_UpperCAmelCase) env_command_parser(subparsers=_UpperCAmelCase) launch_command_parser(subparsers=_UpperCAmelCase) tpu_command_parser(subparsers=_UpperCAmelCase) test_command_parser(subparsers=_UpperCAmelCase) # Let's go SCREAMING_SNAKE_CASE = parser.parse_args() if not hasattr(_UpperCAmelCase , 'func'): parser.print_help() exit(1) # Run args.func(_UpperCAmelCase) if __name__ == "__main__": main()
73
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _snake_case ( A__ , A__ , unittest.TestCase ): _lowercase : List[Any] = StableDiffusionDiffEditPipeline _lowercase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} _lowercase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} _lowercase : List[str] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _lowercase : List[str] = frozenset([] ) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: torch.manual_seed(0) SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , ) SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , ) SCREAMING_SNAKE_CASE = DDIMInverseScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_zero=a , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) SCREAMING_SNAKE_CASE = CLIPTextModel(a) SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') SCREAMING_SNAKE_CASE = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]: SCREAMING_SNAKE_CASE = floats_tensor((1, 16, 16) , rng=random.Random(a)).to(a) SCREAMING_SNAKE_CASE = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a)).to(a) if str(a).startswith('mps'): SCREAMING_SNAKE_CASE = torch.manual_seed(a) else: SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a) SCREAMING_SNAKE_CASE = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]: SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a) SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0] SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB') if str(a).startswith('mps'): SCREAMING_SNAKE_CASE = torch.manual_seed(a) else: SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a) SCREAMING_SNAKE_CASE = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> Optional[int]: SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a) SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0] SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB') if str(a).startswith('mps'): SCREAMING_SNAKE_CASE = torch.manual_seed(a) else: SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a) SCREAMING_SNAKE_CASE = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: if not hasattr(self.pipeline_class , '_optional_components'): return SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(a , a , a) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components}) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a) SCREAMING_SNAKE_CASE = pipe(**a)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(a) SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(a) pipe_loaded.to(a) pipe_loaded.set_progress_bar_config(disable=a) for optional_component in pipe._optional_components: self.assertTrue( getattr(a , a) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a) SCREAMING_SNAKE_CASE = pipe_loaded(**a)[0] SCREAMING_SNAKE_CASE = np.abs(output - output_loaded).max() self.assertLess(a , 1E-4) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = 'cpu' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = self.get_dummy_mask_inputs(a) SCREAMING_SNAKE_CASE = pipe.generate_mask(**a) SCREAMING_SNAKE_CASE = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16)) SCREAMING_SNAKE_CASE = np.array([0] * 9) SCREAMING_SNAKE_CASE = np.abs(mask_slice.flatten() - expected_slice).max() self.assertLessEqual(a , 1E-3) self.assertEqual(mask[0, -3, -4] , 0) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = 'cpu' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(a) SCREAMING_SNAKE_CASE = pipe.invert(**a).images SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3)) SCREAMING_SNAKE_CASE = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(a , 1E-3) def SCREAMING_SNAKE_CASE__ ( self) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=5E-3) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = 'cpu' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'} SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler(**a) SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler(**a) SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(a) SCREAMING_SNAKE_CASE = pipe.invert(**a).images SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3)) SCREAMING_SNAKE_CASE = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(a , 1E-3) @require_torch_gpu @slow class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self) -> Any: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def SCREAMING_SNAKE_CASE__ ( cls) -> List[Any]: SCREAMING_SNAKE_CASE = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png') SCREAMING_SNAKE_CASE = raw_image.convert('RGB').resize((768, 768)) SCREAMING_SNAKE_CASE = raw_image def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = torch.manual_seed(0) SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa) SCREAMING_SNAKE_CASE = DDIMScheduler.from_config(pipe.scheduler.config) SCREAMING_SNAKE_CASE = DDIMInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = 'a bowl of fruit' SCREAMING_SNAKE_CASE = 'a bowl of pears' SCREAMING_SNAKE_CASE = pipe.generate_mask( image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , ) SCREAMING_SNAKE_CASE = pipe.invert( prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a).latents SCREAMING_SNAKE_CASE = pipe( prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , output_type='numpy' , ).images[0] SCREAMING_SNAKE_CASE = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png').resize((768, 768))) / 255 ) assert np.abs((expected_image - image).max()) < 5E-1 def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = torch.manual_seed(0) SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa) SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = 'a bowl of fruit' SCREAMING_SNAKE_CASE = 'a bowl of pears' SCREAMING_SNAKE_CASE = pipe.generate_mask( image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , ) SCREAMING_SNAKE_CASE = pipe.invert( prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a , num_inference_steps=25 , ).latents SCREAMING_SNAKE_CASE = pipe( prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0] SCREAMING_SNAKE_CASE = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png').resize((768, 768))) / 255 ) assert np.abs((expected_image - image).max()) < 5E-1
73
1
import itertools import math def lowerCamelCase__ (_UpperCAmelCase): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase) + 1) , 6): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = 2 while True: if is_prime(_UpperCAmelCase): yield num num += 1 def lowerCamelCase__ (_UpperCAmelCase = 1_0001): return next(itertools.islice(prime_generator() , nth - 1 , _UpperCAmelCase)) if __name__ == "__main__": print(f"""{solution() = }""")
73
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : List[str] = logging.get_logger(__name__) a_ : Any = { 'microsoft/unispeech-large-1500h-cv': ( 'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class _snake_case ( A__ ): _lowercase : Optional[int] = '''unispeech''' def __init__( self , a=32 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=0.1 , a=0.0 , a=0.0 , a=0.1 , a=0.1 , a=0.02 , a=1E-5 , a="group" , a="gelu" , a=(512, 512, 512, 512, 512, 512, 512) , a=(5, 2, 2, 2, 2, 2, 2) , a=(10, 3, 3, 3, 3, 2, 2) , a=False , a=128 , a=16 , a=False , a=True , a=0.05 , a=10 , a=2 , a=0.0 , a=10 , a=0 , a=320 , a=2 , a=0.1 , a=100 , a=256 , a=256 , a=0.1 , a="mean" , a=False , a=False , a=256 , a=80 , a=0 , a=1 , a=2 , a=0.5 , **a , ) -> Optional[int]: super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = feat_extract_norm SCREAMING_SNAKE_CASE = feat_extract_activation SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = conv_bias SCREAMING_SNAKE_CASE = num_conv_pos_embeddings SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups SCREAMING_SNAKE_CASE = len(self.conv_dim) SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = activation_dropout SCREAMING_SNAKE_CASE = feat_proj_dropout SCREAMING_SNAKE_CASE = final_dropout SCREAMING_SNAKE_CASE = layerdrop SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = num_ctc_classes SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = do_stable_layer_norm SCREAMING_SNAKE_CASE = use_weighted_layer_sum SCREAMING_SNAKE_CASE = classifier_proj_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' f''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE = apply_spec_augment SCREAMING_SNAKE_CASE = mask_time_prob SCREAMING_SNAKE_CASE = mask_time_length SCREAMING_SNAKE_CASE = mask_time_min_masks SCREAMING_SNAKE_CASE = mask_feature_prob SCREAMING_SNAKE_CASE = mask_feature_length SCREAMING_SNAKE_CASE = mask_feature_min_masks # parameters for pretraining with codevector quantized representations SCREAMING_SNAKE_CASE = num_codevectors_per_group SCREAMING_SNAKE_CASE = num_codevector_groups SCREAMING_SNAKE_CASE = contrastive_logits_temperature SCREAMING_SNAKE_CASE = feat_quantizer_dropout SCREAMING_SNAKE_CASE = num_negatives SCREAMING_SNAKE_CASE = codevector_dim SCREAMING_SNAKE_CASE = proj_codevector_dim SCREAMING_SNAKE_CASE = diversity_loss_weight # ctc loss SCREAMING_SNAKE_CASE = ctc_loss_reduction SCREAMING_SNAKE_CASE = ctc_zero_infinity # pretraining loss SCREAMING_SNAKE_CASE = replace_prob @property def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: return functools.reduce(operator.mul , self.conv_stride , 1)
73
1
def lowerCamelCase__ (_UpperCAmelCase = 10 , _UpperCAmelCase = 1000 , _UpperCAmelCase = True): assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError('Invalid value for min_val or max_val (min_value < max_value)') return min_val if option else max_val def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return int((number_a + number_a) / 2) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) ), 'argument values must be type of "int"' if lower > higher: raise ValueError('argument value for lower and higher must be(lower > higher)') if not lower < to_guess < higher: raise ValueError( 'guess value must be within the range of lower and higher value') def answer(_UpperCAmelCase) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print('started...') SCREAMING_SNAKE_CASE = lower SCREAMING_SNAKE_CASE = higher SCREAMING_SNAKE_CASE = [] while True: SCREAMING_SNAKE_CASE = get_avg(_UpperCAmelCase , _UpperCAmelCase) last_numbers.append(_UpperCAmelCase) if answer(_UpperCAmelCase) == "low": SCREAMING_SNAKE_CASE = number elif answer(_UpperCAmelCase) == "high": SCREAMING_SNAKE_CASE = number else: break print(F'''guess the number : {last_numbers[-1]}''') print(F'''details : {last_numbers!s}''') def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = int(input('Enter lower value : ').strip()) SCREAMING_SNAKE_CASE = int(input('Enter high value : ').strip()) SCREAMING_SNAKE_CASE = int(input('Enter value to guess : ').strip()) guess_the_number(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) if __name__ == "__main__": main()
73
import argparse import collections import json import os import re import string import sys import numpy as np a_ : Optional[Any] = re.compile(R'\b(a|an|the)\b', re.UNICODE) a_ : List[str] = None def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.') parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.') parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.') parser.add_argument( '--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).') parser.add_argument( '--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.') parser.add_argument( '--na-prob-thresh' , '-t' , type=_UpperCAmelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , ) parser.add_argument( '--out-image-dir' , '-p' , metavar='out_images' , default=_UpperCAmelCase , help='Save precision-recall curves to directory.') parser.add_argument('--verbose' , '-v' , action='store_true') if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args() def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: SCREAMING_SNAKE_CASE = bool(qa['answers']['text']) return qid_to_has_ans def lowerCamelCase__ (_UpperCAmelCase): def remove_articles(_UpperCAmelCase): return ARTICLES_REGEX.sub(' ' , _UpperCAmelCase) def white_space_fix(_UpperCAmelCase): return " ".join(text.split()) def remove_punc(_UpperCAmelCase): SCREAMING_SNAKE_CASE = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(_UpperCAmelCase): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase)))) def lowerCamelCase__ (_UpperCAmelCase): if not s: return [] return normalize_answer(_UpperCAmelCase).split() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return int(normalize_answer(_UpperCAmelCase) == normalize_answer(_UpperCAmelCase)) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase) SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase) SCREAMING_SNAKE_CASE = collections.Counter(_UpperCAmelCase) & collections.Counter(_UpperCAmelCase) SCREAMING_SNAKE_CASE = sum(common.values()) if len(_UpperCAmelCase) == 0 or len(_UpperCAmelCase) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks) if num_same == 0: return 0 SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase) SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase) SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: SCREAMING_SNAKE_CASE = qa['id'] SCREAMING_SNAKE_CASE = [t for t in qa['answers']['text'] if normalize_answer(_UpperCAmelCase)] if not gold_answers: # For unanswerable questions, only correct answer is empty string SCREAMING_SNAKE_CASE = [''] if qid not in preds: print(F'''Missing prediction for {qid}''') continue SCREAMING_SNAKE_CASE = preds[qid] # Take max over all gold answers SCREAMING_SNAKE_CASE = max(compute_exact(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers) SCREAMING_SNAKE_CASE = max(compute_fa(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers) return exact_scores, fa_scores def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = {} for qid, s in scores.items(): SCREAMING_SNAKE_CASE = na_probs[qid] > na_prob_thresh if pred_na: SCREAMING_SNAKE_CASE = float(not qid_to_has_ans[qid]) else: SCREAMING_SNAKE_CASE = s return new_scores def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None): if not qid_list: SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) return collections.OrderedDict( [ ('exact', 1_00.0 * sum(exact_scores.values()) / total), ('f1', 1_00.0 * sum(fa_scores.values()) / total), ('total', total), ]) else: SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) return collections.OrderedDict( [ ('exact', 1_00.0 * sum(exact_scores[k] for k in qid_list) / total), ('f1', 1_00.0 * sum(fa_scores[k] for k in qid_list) / total), ('total', total), ]) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): for k in new_eval: SCREAMING_SNAKE_CASE = new_eval[k] def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): plt.step(_UpperCAmelCase , _UpperCAmelCase , color='b' , alpha=0.2 , where='post') plt.fill_between(_UpperCAmelCase , _UpperCAmelCase , step='post' , alpha=0.2 , color='b') plt.xlabel('Recall') plt.ylabel('Precision') plt.xlim([0.0, 1.05]) plt.ylim([0.0, 1.05]) plt.title(_UpperCAmelCase) plt.savefig(_UpperCAmelCase) plt.clf() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None): SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k]) SCREAMING_SNAKE_CASE = 0.0 SCREAMING_SNAKE_CASE = 1.0 SCREAMING_SNAKE_CASE = 0.0 SCREAMING_SNAKE_CASE = [1.0] SCREAMING_SNAKE_CASE = [0.0] SCREAMING_SNAKE_CASE = 0.0 for i, qid in enumerate(_UpperCAmelCase): if qid_to_has_ans[qid]: true_pos += scores[qid] SCREAMING_SNAKE_CASE = true_pos / float(i + 1) SCREAMING_SNAKE_CASE = true_pos / float(_UpperCAmelCase) if i == len(_UpperCAmelCase) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(_UpperCAmelCase) recalls.append(_UpperCAmelCase) if out_image: plot_pr_curve(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) return {"ap": 1_00.0 * avg_prec} def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): if out_image_dir and not os.path.exists(_UpperCAmelCase): os.makedirs(_UpperCAmelCase) SCREAMING_SNAKE_CASE = sum(1 for v in qid_to_has_ans.values() if v) if num_true_pos == 0: return SCREAMING_SNAKE_CASE = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_exact.png') , title='Precision-Recall curve for Exact Match score' , ) SCREAMING_SNAKE_CASE = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_f1.png') , title='Precision-Recall curve for F1 score' , ) SCREAMING_SNAKE_CASE = {k: float(_UpperCAmelCase) for k, v in qid_to_has_ans.items()} SCREAMING_SNAKE_CASE = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_oracle.png') , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , ) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_exact') merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_f1') merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_oracle') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): if not qid_list: return SCREAMING_SNAKE_CASE = [na_probs[k] for k in qid_list] SCREAMING_SNAKE_CASE = np.ones_like(_UpperCAmelCase) / float(len(_UpperCAmelCase)) plt.hist(_UpperCAmelCase , weights=_UpperCAmelCase , bins=20 , range=(0.0, 1.0)) plt.xlabel('Model probability of no-answer') plt.ylabel('Proportion of dataset') plt.title(F'''Histogram of no-answer probability: {name}''') plt.savefig(os.path.join(_UpperCAmelCase , F'''na_prob_hist_{name}.png''')) plt.clf() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) SCREAMING_SNAKE_CASE = num_no_ans SCREAMING_SNAKE_CASE = cur_score SCREAMING_SNAKE_CASE = 0.0 SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k]) for i, qid in enumerate(_UpperCAmelCase): if qid not in scores: continue if qid_to_has_ans[qid]: SCREAMING_SNAKE_CASE = scores[qid] else: if preds[qid]: SCREAMING_SNAKE_CASE = -1 else: SCREAMING_SNAKE_CASE = 0 cur_score += diff if cur_score > best_score: SCREAMING_SNAKE_CASE = cur_score SCREAMING_SNAKE_CASE = na_probs[qid] return 1_00.0 * best_score / len(_UpperCAmelCase), best_thresh def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = best_exact SCREAMING_SNAKE_CASE = exact_thresh SCREAMING_SNAKE_CASE = best_fa SCREAMING_SNAKE_CASE = fa_thresh def lowerCamelCase__ (): with open(OPTS.data_file) as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) SCREAMING_SNAKE_CASE = dataset_json['data'] with open(OPTS.pred_file) as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) if OPTS.na_prob_file: with open(OPTS.na_prob_file) as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) else: SCREAMING_SNAKE_CASE = {k: 0.0 for k in preds} SCREAMING_SNAKE_CASE = make_qid_to_has_ans(_UpperCAmelCase) # maps qid to True/False SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if v] SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if not v] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_raw_scores(_UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh) SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh) SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase) if has_ans_qids: SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'HasAns') if no_ans_qids: SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'NoAns') if OPTS.na_prob_file: find_all_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir) histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'hasAns') histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'noAns') if OPTS.out_file: with open(OPTS.out_file , 'w') as f: json.dump(_UpperCAmelCase , _UpperCAmelCase) else: print(json.dumps(_UpperCAmelCase , indent=2)) if __name__ == "__main__": a_ : Any = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt main()
73
1
from __future__ import annotations def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = sum(_UpperCAmelCase) create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) return result def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): if sum(_UpperCAmelCase) > max_sum or (remaining_nums_sum + sum(_UpperCAmelCase)) < max_sum: return if sum(_UpperCAmelCase) == max_sum: result.append(_UpperCAmelCase) return for index in range(_UpperCAmelCase , len(_UpperCAmelCase)): create_state_space_tree( _UpperCAmelCase , _UpperCAmelCase , index + 1 , [*path, nums[index]] , _UpperCAmelCase , remaining_nums_sum - nums[index] , ) a_ : Tuple = [3, 34, 4, 12, 5, 2] a_ : str = 9 a_ : Dict = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
73
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor a_ : Dict = logging.get_logger(__name__) class _snake_case ( A__ ): def __init__( self , *a , **a) -> None: warnings.warn( 'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use GLPNImageProcessor instead.' , a , ) super().__init__(*a , **a)
73
1
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging a_ : List[str] = logging.get_logger(__name__) a_ : Dict = { 'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class _snake_case ( A__ ): _lowercase : int = '''gptj''' _lowercase : int = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , a=5_0400 , a=2048 , a=4096 , a=28 , a=16 , a=64 , a=None , a="gelu_new" , a=0.0 , a=0.0 , a=0.0 , a=1E-5 , a=0.02 , a=True , a=5_0256 , a=5_0256 , a=False , **a , ) -> Optional[int]: SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = n_positions SCREAMING_SNAKE_CASE = n_embd SCREAMING_SNAKE_CASE = n_layer SCREAMING_SNAKE_CASE = n_head SCREAMING_SNAKE_CASE = n_inner SCREAMING_SNAKE_CASE = rotary_dim SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = resid_pdrop SCREAMING_SNAKE_CASE = embd_pdrop SCREAMING_SNAKE_CASE = attn_pdrop SCREAMING_SNAKE_CASE = layer_norm_epsilon SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = eos_token_id super().__init__( bos_token_id=a , eos_token_id=a , tie_word_embeddings=a , **a) class _snake_case ( A__ ): def __init__( self , a , a = "default" , a = None , a = False , ) -> Union[str, Any]: super().__init__(a , task=a , patching_specs=a , use_past=a) if not getattr(self._config , 'pad_token_id' , a): # TODO: how to do that better? SCREAMING_SNAKE_CASE = 0 @property def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]: SCREAMING_SNAKE_CASE = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}}) if self.use_past: self.fill_with_past_key_values_(a , direction='inputs') SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'past_sequence + sequence'} else: SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'} return common_inputs @property def SCREAMING_SNAKE_CASE__ ( self) -> int: return self._config.n_layer @property def SCREAMING_SNAKE_CASE__ ( self) -> int: return self._config.n_head def SCREAMING_SNAKE_CASE__ ( self , a , a = -1 , a = -1 , a = False , a = None , ) -> Mapping[str, Any]: SCREAMING_SNAKE_CASE = super(a , self).generate_dummy_inputs( a , batch_size=a , seq_length=a , is_pair=a , framework=a) # We need to order the input in the way they appears in the forward() SCREAMING_SNAKE_CASE = OrderedDict({'input_ids': common_inputs['input_ids']}) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs['input_ids'].shape # Not using the same length for past_key_values SCREAMING_SNAKE_CASE = seqlen + 2 SCREAMING_SNAKE_CASE = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) SCREAMING_SNAKE_CASE = [ (torch.zeros(a), torch.zeros(a)) for _ in range(self.num_layers) ] SCREAMING_SNAKE_CASE = common_inputs['attention_mask'] if self.use_past: SCREAMING_SNAKE_CASE = ordered_inputs['attention_mask'].dtype SCREAMING_SNAKE_CASE = torch.cat( [ordered_inputs['attention_mask'], torch.ones(a , a , dtype=a)] , dim=1) return ordered_inputs @property def SCREAMING_SNAKE_CASE__ ( self) -> int: return 13
73
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class _snake_case ( unittest.TestCase , A__ ): def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = load_tool('text-classification') self.tool.setup() SCREAMING_SNAKE_CASE = load_tool('text-classification' , remote=a) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = self.tool('That\'s quite cool' , ['positive', 'negative']) self.assertEqual(a , 'positive') def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: SCREAMING_SNAKE_CASE = self.remote_tool('That\'s quite cool' , ['positive', 'negative']) self.assertEqual(a , 'positive') def SCREAMING_SNAKE_CASE__ ( self) -> int: SCREAMING_SNAKE_CASE = self.tool(text='That\'s quite cool' , labels=['positive', 'negative']) self.assertEqual(a , 'positive') def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative']) self.assertEqual(a , 'positive')
73
1
from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None): if attention_mask is None: SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id) , tf.inta) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class _snake_case : _lowercase : str = OPTConfig _lowercase : Union[str, Any] = {} _lowercase : str = '''gelu''' def __init__( self , a , a=13 , a=7 , a=True , a=False , a=99 , a=16 , a=2 , a=4 , a=4 , a="gelu" , a=0.1 , a=0.1 , a=20 , a=2 , a=1 , a=0 , a=16 , a=16 , ) -> List[str]: SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = eos_token_id SCREAMING_SNAKE_CASE = pad_token_id SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = embed_dim SCREAMING_SNAKE_CASE = word_embed_proj_dim SCREAMING_SNAKE_CASE = False def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1) SCREAMING_SNAKE_CASE = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=a , **self.config_updates , ) SCREAMING_SNAKE_CASE = prepare_opt_inputs_dict(a , a) return config, inputs_dict def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Union[str, Any]: SCREAMING_SNAKE_CASE = TFOPTModel(config=a) SCREAMING_SNAKE_CASE = inputs_dict['input_ids'] SCREAMING_SNAKE_CASE = input_ids[:1, :] SCREAMING_SNAKE_CASE = inputs_dict['attention_mask'][:1, :] SCREAMING_SNAKE_CASE = 1 # first forward pass SCREAMING_SNAKE_CASE = model(a , attention_mask=a , use_cache=a) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size) SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta) # append to next input_ids and SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1) SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1) SCREAMING_SNAKE_CASE = model(a , attention_mask=a)[0] SCREAMING_SNAKE_CASE = model(a , attention_mask=a , past_key_values=a)[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1]) # select random slice SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1])) SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx] SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(a , a , rtol=1E-3) @require_tf class _snake_case ( A__ , A__ , unittest.TestCase ): _lowercase : Union[str, Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () _lowercase : Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else () _lowercase : List[Any] = ( {'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {} ) _lowercase : int = False _lowercase : List[str] = False _lowercase : str = False _lowercase : List[str] = 10 def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = TFOPTModelTester(self) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*a) def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(a , a): if hasattr(a , 'weight'): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(a , 'weight'): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings SCREAMING_SNAKE_CASE = model_class(config=a) SCREAMING_SNAKE_CASE = _get_word_embedding_weight(a , model.get_input_embeddings()) SCREAMING_SNAKE_CASE = _get_word_embedding_weight(a , model.get_output_embeddings()) # reshape the embeddings model.resize_token_embeddings(a) SCREAMING_SNAKE_CASE = _get_word_embedding_weight(a , model.get_input_embeddings()) SCREAMING_SNAKE_CASE = _get_word_embedding_weight(a , model.get_output_embeddings()) # check that the resized embeddings size matches the desired size. SCREAMING_SNAKE_CASE = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , a) # check that weights remain the same after resizing SCREAMING_SNAKE_CASE = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0: SCREAMING_SNAKE_CASE = False self.assertTrue(a) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , a) SCREAMING_SNAKE_CASE = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0: SCREAMING_SNAKE_CASE = False self.assertTrue(a) def lowerCamelCase__ (_UpperCAmelCase): return tf.constant(_UpperCAmelCase , dtype=tf.intaa) @require_tf class _snake_case ( unittest.TestCase ): _lowercase : List[str] = 99 def SCREAMING_SNAKE_CASE__ ( self) -> int: SCREAMING_SNAKE_CASE = tf.ones((4, 1) , dtype=tf.intaa) * 2 SCREAMING_SNAKE_CASE = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3) + 3, eos_column_vector] , axis=1) SCREAMING_SNAKE_CASE = input_ids.shape[0] SCREAMING_SNAKE_CASE = OPTConfig( vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class _snake_case ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = TFOPTModel.from_pretrained('facebook/opt-350m') SCREAMING_SNAKE_CASE = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]]) SCREAMING_SNAKE_CASE = tf.not_equal(a , model.config.pad_token_id) with tf.GradientTape(): SCREAMING_SNAKE_CASE = model(input_ids=a , attention_mask=a).last_hidden_state SCREAMING_SNAKE_CASE = (1, 11, 512) self.assertEqual(output.shape , a) SCREAMING_SNAKE_CASE = tf.constant( [[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]]) self.assertTrue(np.allclose(output[:, :3, :3] , a , atol=4E-3)) SCREAMING_SNAKE_CASE = tf.function(a , jit_compile=a) SCREAMING_SNAKE_CASE = xla_generate(a , a)[0] self.assertTrue(np.allclose(output[:, :3, :3] , a , atol=4E-2)) @require_tf @slow class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self) -> Any: super().setUp() SCREAMING_SNAKE_CASE = 'facebook/opt-350m' def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(self.path_model) SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(self.path_model) SCREAMING_SNAKE_CASE = [ 'Today is a beautiful day and I want to', 'In the city of', 'Paris is the capital of France and', 'Computers and mobile phones have taken', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False SCREAMING_SNAKE_CASE = tokenizer(a , return_tensors='tf' , padding=a , add_special_tokens=a) SCREAMING_SNAKE_CASE = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1) SCREAMING_SNAKE_CASE = tf.constant( [ [1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70], [-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22], [0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03], [6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77], ]) self.assertTrue(np.allclose(a , a , atol=1E-4)) SCREAMING_SNAKE_CASE = tf.function(a , jit_compile=a) SCREAMING_SNAKE_CASE = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1) self.assertTrue(np.allclose(a , a , atol=1E-4)) @require_tf @slow class _snake_case ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: SCREAMING_SNAKE_CASE = 'facebook/opt-125m' SCREAMING_SNAKE_CASE = [ 'Today is a beautiful day and I want to', 'In the city of New York, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(a) SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(a) for prompt in self.prompts: SCREAMING_SNAKE_CASE = tokenizer(a , return_tensors='tf').input_ids SCREAMING_SNAKE_CASE = model.generate(a , max_length=10) SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a , skip_special_tokens=a) predicted_outputs += generated_string self.assertListEqual(a , a) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: SCREAMING_SNAKE_CASE = 'facebook/opt-350m' SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(a) SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(a) SCREAMING_SNAKE_CASE = 'left' # use different length sentences to test batching SCREAMING_SNAKE_CASE = [ 'Hello, my dog is a little', 'Today, I', ] SCREAMING_SNAKE_CASE = tokenizer(a , return_tensors='tf' , padding=a) SCREAMING_SNAKE_CASE = inputs['input_ids'] SCREAMING_SNAKE_CASE = model.generate(input_ids=a , attention_mask=inputs['attention_mask']) SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors='tf').input_ids SCREAMING_SNAKE_CASE = model.generate(input_ids=a) SCREAMING_SNAKE_CASE = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['attention_mask'][-1] , tf.intaa)) SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors='tf').input_ids SCREAMING_SNAKE_CASE = model.generate(input_ids=a , max_length=model.config.max_length - num_paddings) SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a , skip_special_tokens=a) SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a) SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=a) SCREAMING_SNAKE_CASE = [ 'Hello, my dog is a little bit of a dork.\nI\'m a little bit', 'Today, I was in the middle of a conversation with a friend about the', ] self.assertListEqual(a , a) self.assertListEqual(a , [non_padded_sentence, padded_sentence]) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: SCREAMING_SNAKE_CASE = 'facebook/opt-350m' SCREAMING_SNAKE_CASE = [ 'Today is a beautiful day and I want to', 'In the city of San Francisco, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(a) SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(a) for prompt in self.prompts: SCREAMING_SNAKE_CASE = tokenizer(a , return_tensors='tf').input_ids SCREAMING_SNAKE_CASE = model.generate(a , max_length=10) SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a , skip_special_tokens=a) predicted_outputs += generated_string self.assertListEqual(a , a)
73
import sys import turtle def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): my_pen.up() my_pen.goto(vertexa[0] , vertexa[1]) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1]) my_pen.goto(vertexa[0] , vertexa[1]) my_pen.goto(vertexa[0] , vertexa[1]) if depth == 0: return triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1) triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1) triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( 'Correct format for using this script: ' 'python fractals.py <int:depth_for_fractal>' ) a_ : Any = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('red') a_ : str = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
73
1
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class _snake_case ( A__ ): _lowercase : jnp.ndarray _lowercase : jnp.ndarray class _snake_case ( nn.Module ): _lowercase : int _lowercase : Tuple[int] = (16, 32, 96, 2_56) _lowercase : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: SCREAMING_SNAKE_CASE = nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) SCREAMING_SNAKE_CASE = [] for i in range(len(self.block_out_channels) - 1): SCREAMING_SNAKE_CASE = self.block_out_channels[i] SCREAMING_SNAKE_CASE = self.block_out_channels[i + 1] SCREAMING_SNAKE_CASE = nn.Conv( a , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(a) SCREAMING_SNAKE_CASE = nn.Conv( a , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(a) SCREAMING_SNAKE_CASE = blocks SCREAMING_SNAKE_CASE = nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , a) -> Any: SCREAMING_SNAKE_CASE = self.conv_in(a) SCREAMING_SNAKE_CASE = nn.silu(a) for block in self.blocks: SCREAMING_SNAKE_CASE = block(a) SCREAMING_SNAKE_CASE = nn.silu(a) SCREAMING_SNAKE_CASE = self.conv_out(a) return embedding @flax_register_to_config class _snake_case ( nn.Module , A__ , A__ ): _lowercase : int = 32 _lowercase : int = 4 _lowercase : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _lowercase : Union[bool, Tuple[bool]] = False _lowercase : Tuple[int] = (3_20, 6_40, 12_80, 12_80) _lowercase : int = 2 _lowercase : Union[int, Tuple[int]] = 8 _lowercase : Optional[Union[int, Tuple[int]]] = None _lowercase : int = 12_80 _lowercase : float = 0.0 _lowercase : bool = False _lowercase : jnp.dtype = jnp.floataa _lowercase : bool = True _lowercase : int = 0 _lowercase : str = "rgb" _lowercase : Tuple[int] = (16, 32, 96, 2_56) def SCREAMING_SNAKE_CASE__ ( self , a) -> FrozenDict: # init input tensors SCREAMING_SNAKE_CASE = (1, self.in_channels, self.sample_size, self.sample_size) SCREAMING_SNAKE_CASE = jnp.zeros(a , dtype=jnp.floataa) SCREAMING_SNAKE_CASE = jnp.ones((1,) , dtype=jnp.intaa) SCREAMING_SNAKE_CASE = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa) SCREAMING_SNAKE_CASE = (1, 3, self.sample_size * 8, self.sample_size * 8) SCREAMING_SNAKE_CASE = jnp.zeros(a , dtype=jnp.floataa) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = jax.random.split(a) SCREAMING_SNAKE_CASE = {'params': params_rng, 'dropout': dropout_rng} return self.init(a , a , a , a , a)["params"] def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = self.block_out_channels SCREAMING_SNAKE_CASE = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. SCREAMING_SNAKE_CASE = self.num_attention_heads or self.attention_head_dim # input SCREAMING_SNAKE_CASE = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time SCREAMING_SNAKE_CASE = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift) SCREAMING_SNAKE_CASE = FlaxTimestepEmbedding(a , dtype=self.dtype) SCREAMING_SNAKE_CASE = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) SCREAMING_SNAKE_CASE = self.only_cross_attention if isinstance(a , a): SCREAMING_SNAKE_CASE = (only_cross_attention,) * len(self.down_block_types) if isinstance(a , a): SCREAMING_SNAKE_CASE = (num_attention_heads,) * len(self.down_block_types) # down SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = block_out_channels[0] SCREAMING_SNAKE_CASE = nn.Conv( a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(a) for i, down_block_type in enumerate(self.down_block_types): SCREAMING_SNAKE_CASE = output_channel SCREAMING_SNAKE_CASE = block_out_channels[i] SCREAMING_SNAKE_CASE = i == len(a) - 1 if down_block_type == "CrossAttnDownBlock2D": SCREAMING_SNAKE_CASE = FlaxCrossAttnDownBlockaD( in_channels=a , out_channels=a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: SCREAMING_SNAKE_CASE = FlaxDownBlockaD( in_channels=a , out_channels=a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(a) for _ in range(self.layers_per_block): SCREAMING_SNAKE_CASE = nn.Conv( a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(a) if not is_final_block: SCREAMING_SNAKE_CASE = nn.Conv( a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(a) SCREAMING_SNAKE_CASE = down_blocks SCREAMING_SNAKE_CASE = controlnet_down_blocks # mid SCREAMING_SNAKE_CASE = block_out_channels[-1] SCREAMING_SNAKE_CASE = FlaxUNetMidBlockaDCrossAttn( in_channels=a , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) SCREAMING_SNAKE_CASE = nn.Conv( a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , a , a , a , a , a = 1.0 , a = True , a = False , ) -> Union[FlaxControlNetOutput, Tuple]: SCREAMING_SNAKE_CASE = self.controlnet_conditioning_channel_order if channel_order == "bgr": SCREAMING_SNAKE_CASE = jnp.flip(a , axis=1) # 1. time if not isinstance(a , jnp.ndarray): SCREAMING_SNAKE_CASE = jnp.array([timesteps] , dtype=jnp.intaa) elif isinstance(a , jnp.ndarray) and len(timesteps.shape) == 0: SCREAMING_SNAKE_CASE = timesteps.astype(dtype=jnp.floataa) SCREAMING_SNAKE_CASE = jnp.expand_dims(a , 0) SCREAMING_SNAKE_CASE = self.time_proj(a) SCREAMING_SNAKE_CASE = self.time_embedding(a) # 2. pre-process SCREAMING_SNAKE_CASE = jnp.transpose(a , (0, 2, 3, 1)) SCREAMING_SNAKE_CASE = self.conv_in(a) SCREAMING_SNAKE_CASE = jnp.transpose(a , (0, 2, 3, 1)) SCREAMING_SNAKE_CASE = self.controlnet_cond_embedding(a) sample += controlnet_cond # 3. down SCREAMING_SNAKE_CASE = (sample,) for down_block in self.down_blocks: if isinstance(a , a): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = down_block(a , a , a , deterministic=not train) else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = down_block(a , a , deterministic=not train) down_block_res_samples += res_samples # 4. mid SCREAMING_SNAKE_CASE = self.mid_block(a , a , a , deterministic=not train) # 5. contronet blocks SCREAMING_SNAKE_CASE = () for down_block_res_sample, controlnet_block in zip(a , self.controlnet_down_blocks): SCREAMING_SNAKE_CASE = controlnet_block(a) controlnet_down_block_res_samples += (down_block_res_sample,) SCREAMING_SNAKE_CASE = controlnet_down_block_res_samples SCREAMING_SNAKE_CASE = self.controlnet_mid_block(a) # 6. scaling SCREAMING_SNAKE_CASE = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=a , mid_block_res_sample=a)
73
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed a_ : Any = 'true' def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=16): set_seed(42) SCREAMING_SNAKE_CASE = RegressionModel() SCREAMING_SNAKE_CASE = deepcopy(_UpperCAmelCase) SCREAMING_SNAKE_CASE = RegressionDataset(length=_UpperCAmelCase) SCREAMING_SNAKE_CASE = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase) model.to(accelerator.device) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase) return model, ddp_model, dataloader def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased') SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' , split='validation') def tokenize_function(_UpperCAmelCase): SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase) return outputs with accelerator.main_process_first(): SCREAMING_SNAKE_CASE = dataset.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels') def collate_fn(_UpperCAmelCase): if use_longest: return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt') return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt') return DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=16) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = Accelerator(dispatch_batches=_UpperCAmelCase , split_batches=_UpperCAmelCase) SCREAMING_SNAKE_CASE = get_dataloader(_UpperCAmelCase , not dispatch_batches) SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained( 'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [] for batch in dataloader: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((logit, target)) logits_and_targets.append((logit, target)) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], [] for logit, targ in logits_and_targets: logits.append(_UpperCAmelCase) targs.append(_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.cat(_UpperCAmelCase), torch.cat(_UpperCAmelCase) return logits, targs def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=16): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_basic_setup(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_predictions(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) assert ( len(_UpperCAmelCase) == num_samples ), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCAmelCase)}''' def lowerCamelCase__ (_UpperCAmelCase = False , _UpperCAmelCase = False): SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc') SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_mrpc_setup(_UpperCAmelCase , _UpperCAmelCase) # First do baseline SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['no'] model.to(_UpperCAmelCase) model.eval() for batch in dataloader: batch.to(_UpperCAmelCase) with torch.inference_mode(): SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1) metric.add_batch(predictions=_UpperCAmelCase , references=batch['labels']) SCREAMING_SNAKE_CASE = metric.compute() # Then do distributed SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['ddp'] model.eval() for batch in dataloader: with torch.inference_mode(): SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1) SCREAMING_SNAKE_CASE = batch['labels'] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((preds, references)) metric.add_batch(predictions=_UpperCAmelCase , references=_UpperCAmelCase) SCREAMING_SNAKE_CASE = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key]), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('**Testing gather_for_metrics**') for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''') test_mrpc(_UpperCAmelCase , _UpperCAmelCase) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test torch metrics**') for split_batches in [True, False]: for dispatch_batches in [True, False]: SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase) if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''') test_torch_metrics(_UpperCAmelCase , 99) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test last batch is not dropped when perfectly divisible**') SCREAMING_SNAKE_CASE = Accelerator() test_torch_metrics(_UpperCAmelCase , 512) accelerator.state._reset_state() def lowerCamelCase__ (_UpperCAmelCase): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
73
1
from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: SCREAMING_SNAKE_CASE = tf.convert_to_tensor( [ [ 8.2_22_09_91, # 3rd highest value; idx. 0 -0.5_62_00_44, 5.23_22_97_52, 4.0_38_63_93, -6.8_79_83_78, -0.54_78_58_02, -3.2_01_21_53, 2.92_77_71_76, 1.88_17_19_53, 7.35_34_12_76, # 5th highest value; idx. 9 8.43_20_78_33, # 2nd highest value; idx. 10 -9.85_71_18_36, -5.96_20_92_36, -1.13_03_91_61, -7.1_11_52_94, -0.8_36_96_33, -5.3_18_64_08, 7.06_42_74_07, 0.81_36_93_44, -0.82_02_38_17, -5.9_17_97_96, 0.58_81_34_43, -6.99_77_84_38, 4.71_55_11_89, -0.18_77_16_37, 7.44_02_07_59, # 4th highest value; idx. 25 9.38_45_09_87, # 1st highest value; idx. 26 2.12_66_29_41, -9.32_56_20_38, 2.35_65_25_22, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58_42_55_18, 4.53_13_92_38, -5.57_51_04_64, -6.28_03_06_99, -7.19_52_95_03, -4.02_12_25_51, 1.39_33_70_37, -6.06_70_70_57, 1.59_48_05_17, -9.64_31_19, 0.03_90_77_99, 0.67_23_17_62, -8.88_20_67_26, 6.27_11_59_22, # 4th highest value; idx. 13 2.28_52_07_23, 4.82_76_75_06, 4.30_42_13_68, 8.8_27_53_13, # 2nd highest value; idx. 17 5.44_02_99_58, # 5th highest value; idx. 18 -4.4_73_57_94, 7.38_57_95_36, # 3rd highest value; idx. 20 -2.91_05_16_63, 2.61_94_60_77, -2.5_67_47_62, -9.48_95_93_02, -4.02_92_26_45, -1.35_41_69_18, 9.67_70_23_23, # 1st highest value; idx. 27 -5.89_47_85_53, 1.85_37_04_67, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) SCREAMING_SNAKE_CASE = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above SCREAMING_SNAKE_CASE = tf.convert_to_tensor( [8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above SCREAMING_SNAKE_CASE = tf_top_k_top_p_filtering(a , top_k=10 , top_p=0.6 , min_tokens_to_keep=4) SCREAMING_SNAKE_CASE = output[output != -float('inf')] SCREAMING_SNAKE_CASE = tf.cast( tf.where(tf.not_equal(a , tf.constant(-float('inf') , dtype=tf.floataa))) , dtype=tf.intaa , ) tf.debugging.assert_near(a , a , rtol=1E-12) tf.debugging.assert_equal(a , a) @require_tf class _snake_case ( unittest.TestCase , A__ ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): _lowercase : Tuple = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: # TF-only test: tf.saved_model export SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2') SCREAMING_SNAKE_CASE = 2 SCREAMING_SNAKE_CASE = 2 class _snake_case ( tf.Module ): def __init__( self , a) -> Optional[Any]: super(a , self).__init__() SCREAMING_SNAKE_CASE = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids'), tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask'), ) , jit_compile=a , ) def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Union[str, Any]: SCREAMING_SNAKE_CASE = self.model.generate( input_ids=a , attention_mask=a , max_new_tokens=a , return_dict_in_generate=a , ) return {"sequences": outputs["sequences"]} SCREAMING_SNAKE_CASE = [[2, 0], [102, 103]] SCREAMING_SNAKE_CASE = [[1, 0], [1, 1]] SCREAMING_SNAKE_CASE = DummyModel(model=a) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(a , a , signatures={'serving_default': dummy_model.serving}) SCREAMING_SNAKE_CASE = tf.saved_model.load(a).signatures['serving_default'] for batch_size in range(1 , len(a) + 1): SCREAMING_SNAKE_CASE = { 'input_ids': tf.constant(dummy_input_ids[:batch_size]), 'attention_mask': tf.constant(dummy_attention_masks[:batch_size]), } SCREAMING_SNAKE_CASE = serving_func(**a)['sequences'] SCREAMING_SNAKE_CASE = test_model.generate(**a , max_new_tokens=a) tf.debugging.assert_equal(a , a) @slow def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: # TF-only test: tf.saved_model export SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2') SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = 2 class _snake_case ( tf.Module ): def __init__( self , a) -> Optional[int]: super(a , self).__init__() SCREAMING_SNAKE_CASE = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids'), tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask'), ) , jit_compile=a , ) def SCREAMING_SNAKE_CASE__ ( self , a , a) -> List[str]: SCREAMING_SNAKE_CASE = self.model.generate( input_ids=a , attention_mask=a , max_new_tokens=a , return_dict_in_generate=a , ) return {"sequences": outputs["sequences"]} SCREAMING_SNAKE_CASE = [[2], [102, 103]] SCREAMING_SNAKE_CASE = [[1], [1, 1]] SCREAMING_SNAKE_CASE = DummyModel(model=a) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(a , a , signatures={'serving_default': dummy_model.serving}) SCREAMING_SNAKE_CASE = tf.saved_model.load(a).signatures['serving_default'] for input_row in range(len(a)): SCREAMING_SNAKE_CASE = { 'input_ids': tf.constant([dummy_input_ids[input_row]]), 'attention_mask': tf.constant([dummy_attention_masks[input_row]]), } SCREAMING_SNAKE_CASE = serving_func(**a)['sequences'] SCREAMING_SNAKE_CASE = test_model.generate(**a , max_new_tokens=a) tf.debugging.assert_equal(a , a) @slow @require_tensorflow_text def SCREAMING_SNAKE_CASE__ ( self) -> str: # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=a) class _snake_case ( tf.keras.layers.Layer ): def __init__( self) -> Optional[int]: super().__init__() SCREAMING_SNAKE_CASE = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(a , 'spiece.model') , 'rb').read()) SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5') def SCREAMING_SNAKE_CASE__ ( self , a , *a , **a) -> Tuple: SCREAMING_SNAKE_CASE = self.tokenizer.tokenize(a) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = text.pad_model_inputs( a , max_seq_length=64 , pad_value=self.model.config.pad_token_id) SCREAMING_SNAKE_CASE = self.model.generate(input_ids=a , attention_mask=a) return self.tokenizer.detokenize(a) SCREAMING_SNAKE_CASE = CompleteSentenceTransformer() SCREAMING_SNAKE_CASE = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs') SCREAMING_SNAKE_CASE = complete_model(a) SCREAMING_SNAKE_CASE = tf.keras.Model(a , a) keras_model.save(a) def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: # Has PT equivalent: this test relies on random sampling SCREAMING_SNAKE_CASE = { 'do_sample': True, 'num_beams': 1, 'top_p': 0.7, 'top_k': 10, 'temperature': 0.7, } SCREAMING_SNAKE_CASE = 14 SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2') SCREAMING_SNAKE_CASE = 'Hello, my dog is cute and' SCREAMING_SNAKE_CASE = tokenizer(a , return_tensors='tf') SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2') SCREAMING_SNAKE_CASE = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(':/CPU:0'): tf.random.set_seed(0) SCREAMING_SNAKE_CASE = model.generate(**a , eos_token_id=a , **a) self.assertTrue(expectation == len(generated_tokens[0])) SCREAMING_SNAKE_CASE = [638, 198] with tf.device(':/CPU:0'): tf.random.set_seed(0) SCREAMING_SNAKE_CASE = model.generate(**a , eos_token_id=a , **a) self.assertTrue(expectation == len(generated_tokens[0])) def SCREAMING_SNAKE_CASE__ ( self) -> int: # Has PT equivalent: ample use of framework-specific code SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart') SCREAMING_SNAKE_CASE = 'Hugging Face is a technology company based in New York and Paris.' SCREAMING_SNAKE_CASE = bart_tokenizer(a , return_tensors='tf').input_ids SCREAMING_SNAKE_CASE = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart') SCREAMING_SNAKE_CASE = bart_model.generate(a).numpy() class _snake_case ( A__ ): def SCREAMING_SNAKE_CASE__ ( self , a , a=None , **a) -> Optional[Any]: return super().call(a , **a) SCREAMING_SNAKE_CASE = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart') SCREAMING_SNAKE_CASE = bart_model.generate(a , foo='bar').numpy() self.assertTrue(np.array_equal(a , a)) class _snake_case ( bart_model.model.encoder.__class__ ): def SCREAMING_SNAKE_CASE__ ( self , a , **a) -> List[str]: return super().call(a , **a) SCREAMING_SNAKE_CASE = FakeEncoder(bart_model.config , bart_model.model.shared) SCREAMING_SNAKE_CASE = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) SCREAMING_SNAKE_CASE = bart_model.generate(a).numpy() with self.assertRaises(a): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(a , foo='bar')
73
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available a_ : List[str] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ['GPTSw3Tokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys a_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
73
1
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class _snake_case : # setable values _lowercase : Optional[int] = None _lowercase : Optional[jnp.ndarray] = None _lowercase : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def SCREAMING_SNAKE_CASE__ ( cls) -> int: return cls() @dataclass class _snake_case ( A__ ): _lowercase : jnp.ndarray _lowercase : jnp.ndarray _lowercase : KarrasVeSchedulerState class _snake_case ( A__ , A__ ): @property def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: return True @register_to_config def __init__( self , a = 0.02 , a = 100 , a = 1.0_07 , a = 80 , a = 0.05 , a = 50 , ) -> Optional[int]: pass def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: return KarrasVeSchedulerState.create() def SCREAMING_SNAKE_CASE__ ( self , a , a , a = ()) -> KarrasVeSchedulerState: SCREAMING_SNAKE_CASE = jnp.arange(0 , a)[::-1].copy() SCREAMING_SNAKE_CASE = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=a , schedule=jnp.array(a , dtype=jnp.floataa) , timesteps=a , ) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , ) -> Tuple[jnp.ndarray, float]: if self.config.s_min <= sigma <= self.config.s_max: SCREAMING_SNAKE_CASE = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1) else: SCREAMING_SNAKE_CASE = 0 # sample eps ~ N(0, S_noise^2 * I) SCREAMING_SNAKE_CASE = random.split(a , num=1) SCREAMING_SNAKE_CASE = self.config.s_noise * random.normal(key=a , shape=sample.shape) SCREAMING_SNAKE_CASE = sigma + gamma * sigma SCREAMING_SNAKE_CASE = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: SCREAMING_SNAKE_CASE = sample_hat + sigma_hat * model_output SCREAMING_SNAKE_CASE = (sample_hat - pred_original_sample) / sigma_hat SCREAMING_SNAKE_CASE = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=a , derivative=a , state=a) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: SCREAMING_SNAKE_CASE = sample_prev + sigma_prev * model_output SCREAMING_SNAKE_CASE = (sample_prev - pred_original_sample) / sigma_prev SCREAMING_SNAKE_CASE = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=a , derivative=a , state=a) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a) -> int: raise NotImplementedError()
73
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path a_ : str = [ {'dataset': 'wikipedia', 'config_name': '20220301.de'}, {'dataset': 'wikipedia', 'config_name': '20220301.en'}, {'dataset': 'wikipedia', 'config_name': '20220301.fr'}, {'dataset': 'wikipedia', 'config_name': '20220301.frr'}, {'dataset': 'wikipedia', 'config_name': '20220301.it'}, {'dataset': 'wikipedia', 'config_name': '20220301.simple'}, {'dataset': 'snli', 'config_name': 'plain_text'}, {'dataset': 'eli5', 'config_name': 'LFQA_reddit'}, {'dataset': 'wiki40b', 'config_name': 'en'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'}, {'dataset': 'natural_questions', 'config_name': 'default'}, ] def lowerCamelCase__ (_UpperCAmelCase=True): if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A__ ) ) class _snake_case ( A__ ): _lowercase : Optional[Any] = None _lowercase : Optional[Any] = None def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Optional[Any]: with TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE = dataset_module_factory(a , cache_dir=a) SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=a) SCREAMING_SNAKE_CASE = builder_cls( cache_dir=a , config_name=a , hash=dataset_module.hash , ) SCREAMING_SNAKE_CASE = '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=a).replace(os.sep , '/'), config.DATASET_INFO_FILENAME, ]) SCREAMING_SNAKE_CASE = cached_path(a , cache_dir=a) self.assertTrue(os.path.exists(a)) @pytest.mark.integration def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('test_hf_gcp') / 'test_wikipedia_simple' SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase) SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path) SCREAMING_SNAKE_CASE = builder_cls( cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam SCREAMING_SNAKE_CASE = None builder_instance.download_and_prepare() SCREAMING_SNAKE_CASE = builder_instance.as_dataset() assert ds @pytest.mark.integration def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase) SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=_UpperCAmelCase) SCREAMING_SNAKE_CASE = builder_cls( cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , ) SCREAMING_SNAKE_CASE = builder_instance.as_streaming_dataset() assert ds assert isinstance(_UpperCAmelCase , _UpperCAmelCase) assert "train" in ds assert isinstance(ds['train'] , _UpperCAmelCase) assert next(iter(ds['train']))
73
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class _snake_case ( A__ ): _lowercase : List[Any] = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) _lowercase : Any = '''CIDAS/clipseg-rd64-refined''' _lowercase : List[str] = '''image_segmenter''' _lowercase : str = CLIPSegForImageSegmentation _lowercase : List[Any] = ['''image''', '''text'''] _lowercase : List[str] = ['''image'''] def __init__( self , *a , **a) -> str: requires_backends(self , ['vision']) super().__init__(*a , **a) def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Optional[Any]: return self.pre_processor(text=[label] , images=[image] , padding=a , return_tensors='pt') def SCREAMING_SNAKE_CASE__ ( self , a) -> str: with torch.no_grad(): SCREAMING_SNAKE_CASE = self.model(**a).logits return logits def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple: SCREAMING_SNAKE_CASE = outputs.cpu().detach().numpy() SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 1 return Image.fromarray((array * 255).astype(np.uinta))
73
from __future__ import annotations def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = 2 SCREAMING_SNAKE_CASE = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(_UpperCAmelCase) if n > 1: factors.append(_UpperCAmelCase) return factors if __name__ == "__main__": import doctest doctest.testmod()
73
1
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('weight',) SCREAMING_SNAKE_CASE = torch.permute(_UpperCAmelCase , (0, 2, 1)) elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCAmelCase): # linear layer SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('weight',) SCREAMING_SNAKE_CASE = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('weight',) return flax_key_tuple, flax_tensor def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): if "metadata" in layer: SCREAMING_SNAKE_CASE = layer.split('metadata') SCREAMING_SNAKE_CASE = ''.join(split_layer[0])[:-1] SCREAMING_SNAKE_CASE = [tuple(('metadata' + split_layer[1]).split('/'))] elif "kvstore" in layer: SCREAMING_SNAKE_CASE = layer.split('kvstore') SCREAMING_SNAKE_CASE = ''.join(split_layer[0])[:-1] SCREAMING_SNAKE_CASE = [tuple(('kvstore' + split_layer[1]).split('/'))] else: SCREAMING_SNAKE_CASE = layer.split('/') SCREAMING_SNAKE_CASE = '/'.join(split_layer[:-1]) SCREAMING_SNAKE_CASE = (split_layer[-1],) if "kvstore/path" in layer: SCREAMING_SNAKE_CASE = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}''' elif "kvstore/driver" in layer: SCREAMING_SNAKE_CASE = 'file' else: SCREAMING_SNAKE_CASE = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = rename_keys(_UpperCAmelCase) SCREAMING_SNAKE_CASE = {} for k, v in current_block.items(): SCREAMING_SNAKE_CASE = v SCREAMING_SNAKE_CASE = new_current_block torch.save(_UpperCAmelCase , _UpperCAmelCase) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = WEIGHTS_NAME): SCREAMING_SNAKE_CASE = convert_file_size_to_int(_UpperCAmelCase) SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase) with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb') as fp: SCREAMING_SNAKE_CASE = serialization.msgpack_restore(fp.read())['optimizer']['target'] SCREAMING_SNAKE_CASE = flatten_dict(_UpperCAmelCase , sep='/') SCREAMING_SNAKE_CASE = {} for layer in checkpoint_info.keys(): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_key_and_tensorstore_dict( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) if curr_real_layer_name in all_layers: SCREAMING_SNAKE_CASE = content else: SCREAMING_SNAKE_CASE = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file SCREAMING_SNAKE_CASE = ts.open(unflatten_dict(all_layers[key])).result().read().result() SCREAMING_SNAKE_CASE = torch.tensor(_UpperCAmelCase) SCREAMING_SNAKE_CASE = raw_weights.numel() * dtype_byte_size(raw_weights.dtype) # use the renaming pattern from the small conversion scripts SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = rename_base_flax_keys(tuple(key.split('/')) , _UpperCAmelCase) SCREAMING_SNAKE_CASE = '/'.join(_UpperCAmelCase) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: SCREAMING_SNAKE_CASE = os.path.join( _UpperCAmelCase , weights_name.replace('.bin' , F'''-{len(_UpperCAmelCase)+1:05d}-of-???.bin''')) rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase) sharded_state_dicts.append(current_block.keys()) del current_block SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = raw_weights.to(getattr(_UpperCAmelCase , _UpperCAmelCase)) current_block_size += weight_size total_size += weight_size # Add the last block SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , weights_name.replace('.bin' , F'''-{len(_UpperCAmelCase)+1:05d}-of-???.bin''')) rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase) sharded_state_dicts.append(current_block.keys()) # If we only have one shard, we return it if len(_UpperCAmelCase) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = {} for idx, shard in enumerate(_UpperCAmelCase): SCREAMING_SNAKE_CASE = weights_name.replace( '.bin' , F'''-{idx+1:05d}-of-{len(_UpperCAmelCase):05d}.bin''') # len(sharded_state_dicts):05d} SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , weights_name.replace('.bin' , F'''-{idx+1:05d}-of-???.bin''')) os.rename(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase)) SCREAMING_SNAKE_CASE = shard for key in shard: SCREAMING_SNAKE_CASE = shard_file # Add the metadata SCREAMING_SNAKE_CASE = {'total_size': total_size} SCREAMING_SNAKE_CASE = {'metadata': metadata, 'weight_map': weight_map} with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase) , 'w' , encoding='utf-8') as f: SCREAMING_SNAKE_CASE = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase) + '\n' f.write(_UpperCAmelCase) return metadata, index if __name__ == "__main__": a_ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--switch_t5x_checkpoint_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size') parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted', type=str, required=False, help='Path to the output pytorch model.', ) a_ : Any = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def lowerCamelCase__ (): from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer SCREAMING_SNAKE_CASE = SwitchTransformersConfig.from_pretrained('google/switch-base-8') config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted') SCREAMING_SNAKE_CASE = SwitchTransformersForConditionalGeneration.from_pretrained( '/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto') SCREAMING_SNAKE_CASE = TaTokenizer.from_pretrained('t5-small') SCREAMING_SNAKE_CASE = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.' SCREAMING_SNAKE_CASE = tokenizer(_UpperCAmelCase , return_tensors='pt').input_ids SCREAMING_SNAKE_CASE = model.generate(_UpperCAmelCase , decoder_start_token_id=0) print(tokenizer.decode(out[0]))
73
import math import os import sys def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = '' try: with open(_UpperCAmelCase , 'rb') as binary_file: SCREAMING_SNAKE_CASE = binary_file.read() for dat in data: SCREAMING_SNAKE_CASE = F'''{dat:08b}''' result += curr_byte return result except OSError: print('File not accessible') sys.exit() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): lexicon.pop(_UpperCAmelCase) SCREAMING_SNAKE_CASE = last_match_id if math.loga(_UpperCAmelCase).is_integer(): for curr_key in lexicon: SCREAMING_SNAKE_CASE = '0' + lexicon[curr_key] SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:] def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = {'0': '0', '1': '1'} SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = '', '' SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) for i in range(len(_UpperCAmelCase)): curr_string += data_bits[i] if curr_string not in lexicon: continue SCREAMING_SNAKE_CASE = lexicon[curr_string] result += last_match_id add_key_to_lexicon(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) index += 1 SCREAMING_SNAKE_CASE = '' while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": SCREAMING_SNAKE_CASE = lexicon[curr_string] result += last_match_id return result def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = os.path.getsize(_UpperCAmelCase) SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:] SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) return "0" * (length_length - 1) + file_length_binary + compressed def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = 8 try: with open(_UpperCAmelCase , 'wb') as opened_file: SCREAMING_SNAKE_CASE = [ to_write[i : i + byte_length] for i in range(0 , len(_UpperCAmelCase) , _UpperCAmelCase) ] if len(result_byte_array[-1]) % byte_length == 0: result_byte_array.append('10000000') else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1]) - 1 ) for elem in result_byte_array: opened_file.write(int(_UpperCAmelCase , 2).to_bytes(1 , byteorder='big')) except OSError: print('File not accessible') sys.exit() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = read_file_binary(_UpperCAmelCase) SCREAMING_SNAKE_CASE = compress_data(_UpperCAmelCase) SCREAMING_SNAKE_CASE = add_file_length(_UpperCAmelCase , _UpperCAmelCase) write_file_binary(_UpperCAmelCase , _UpperCAmelCase) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
73
1
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): if digit_amount > 0: return round(number - int(_UpperCAmelCase) , _UpperCAmelCase) return number - int(_UpperCAmelCase) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
73
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def lowerCamelCase__ (_UpperCAmelCase): return 1.0 / (1.0 + np.exp(-_outputs)) def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = np.max(_outputs , axis=-1 , keepdims=_UpperCAmelCase) SCREAMING_SNAKE_CASE = np.exp(_outputs - maxes) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_UpperCAmelCase) class _snake_case ( A__ ): _lowercase : Tuple = '''sigmoid''' _lowercase : List[str] = '''softmax''' _lowercase : Tuple = '''none''' @add_end_docstrings( A__ , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class _snake_case ( A__ ): _lowercase : Optional[Any] = False _lowercase : Tuple = ClassificationFunction.NONE def __init__( self , **a) -> Optional[Any]: super().__init__(**a) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING) def SCREAMING_SNAKE_CASE__ ( self , a=None , a=None , a="" , **a) -> Tuple: # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" SCREAMING_SNAKE_CASE = tokenizer_kwargs SCREAMING_SNAKE_CASE = {} if hasattr(self.model.config , 'return_all_scores') and return_all_scores is None: SCREAMING_SNAKE_CASE = self.model.config.return_all_scores if isinstance(a , a) or top_k is None: SCREAMING_SNAKE_CASE = top_k SCREAMING_SNAKE_CASE = False elif return_all_scores is not None: warnings.warn( '`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of' ' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , a , ) if return_all_scores: SCREAMING_SNAKE_CASE = None else: SCREAMING_SNAKE_CASE = 1 if isinstance(a , a): SCREAMING_SNAKE_CASE = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: SCREAMING_SNAKE_CASE = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *a , **a) -> Optional[int]: SCREAMING_SNAKE_CASE = super().__call__(*a , **a) # TODO try and retrieve it in a nicer way from _sanitize_parameters. SCREAMING_SNAKE_CASE = 'top_k' not in kwargs if isinstance(args[0] , a) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def SCREAMING_SNAKE_CASE__ ( self , a , **a) -> Dict[str, GenericTensor]: SCREAMING_SNAKE_CASE = self.framework if isinstance(a , a): return self.tokenizer(**a , return_tensors=a , **a) elif isinstance(a , a) and len(a) == 1 and isinstance(inputs[0] , a) and len(inputs[0]) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a , **a) elif isinstance(a , a): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( 'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a' ' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.') return self.tokenizer(a , return_tensors=a , **a) def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]: return self.model(**a) def SCREAMING_SNAKE_CASE__ ( self , a , a=None , a=1 , a=True) -> Any: # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: SCREAMING_SNAKE_CASE = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: SCREAMING_SNAKE_CASE = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , 'function_to_apply') and function_to_apply is None: SCREAMING_SNAKE_CASE = self.model.config.function_to_apply else: SCREAMING_SNAKE_CASE = ClassificationFunction.NONE SCREAMING_SNAKE_CASE = model_outputs['logits'][0] SCREAMING_SNAKE_CASE = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: SCREAMING_SNAKE_CASE = sigmoid(a) elif function_to_apply == ClassificationFunction.SOFTMAX: SCREAMING_SNAKE_CASE = softmax(a) elif function_to_apply == ClassificationFunction.NONE: SCREAMING_SNAKE_CASE = outputs else: raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''') if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} SCREAMING_SNAKE_CASE = [ {'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(a) ] if not _legacy: dict_scores.sort(key=lambda a: x["score"] , reverse=a) if top_k is not None: SCREAMING_SNAKE_CASE = dict_scores[:top_k] return dict_scores
73
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class _snake_case ( unittest.TestCase ): _lowercase : Any = ViTImageProcessor if is_vision_available() else None @property def SCREAMING_SNAKE_CASE__ ( self) -> Dict: return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ ( self) -> Dict: SCREAMING_SNAKE_CASE = (3, 32, 128) SCREAMING_SNAKE_CASE = tempfile.mkdtemp() # fmt: off SCREAMING_SNAKE_CASE = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # fmt: on SCREAMING_SNAKE_CASE = dict(zip(a , range(len(a)))) SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: fp.write(json.dumps(a) + '\n') SCREAMING_SNAKE_CASE = { 'do_normalize': False, 'do_resize': True, 'image_processor_type': 'ViTImageProcessor', 'resample': 3, 'size': {'height': 32, 'width': 128}, } SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , a) with open(self.image_processor_file , 'w' , encoding='utf-8') as fp: json.dump(a , a) def SCREAMING_SNAKE_CASE__ ( self , **a) -> List[Any]: return MgpstrTokenizer.from_pretrained(self.tmpdirname , **a) def SCREAMING_SNAKE_CASE__ ( self , **a) -> Dict: return ViTImageProcessor.from_pretrained(self.tmpdirname , **a) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: shutil.rmtree(self.tmpdirname) def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta) SCREAMING_SNAKE_CASE = Image.fromarray(np.moveaxis(a , 0 , -1)) return image_input def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = MgpstrProcessor(tokenizer=a , image_processor=a) processor.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=a) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.char_tokenizer , a) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , a) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = MgpstrProcessor(tokenizer=a , image_processor=a) processor.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)') SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=a , padding_value=1.0) SCREAMING_SNAKE_CASE = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=a , padding_value=1.0) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.char_tokenizer , a) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , a) def SCREAMING_SNAKE_CASE__ ( self) -> Dict: SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = MgpstrProcessor(tokenizer=a , image_processor=a) SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = image_processor(a , return_tensors='np') SCREAMING_SNAKE_CASE = processor(images=a , return_tensors='np') for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = MgpstrProcessor(tokenizer=a , image_processor=a) SCREAMING_SNAKE_CASE = 'test' SCREAMING_SNAKE_CASE = processor(text=a) SCREAMING_SNAKE_CASE = tokenizer(a) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = MgpstrProcessor(tokenizer=a , image_processor=a) SCREAMING_SNAKE_CASE = 'test' SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = processor(text=a , images=a) self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'labels']) # test if it raises when no input is passed with pytest.raises(a): processor() def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = MgpstrProcessor(tokenizer=a , image_processor=a) SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE = processor.char_decode(a) SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a) SCREAMING_SNAKE_CASE = [seq.replace(' ' , '') for seq in decoded_tok] self.assertListEqual(a , a) def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = MgpstrProcessor(tokenizer=a , image_processor=a) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = processor(text=a , images=a) self.assertListEqual(list(inputs.keys()) , processor.model_input_names) def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = MgpstrProcessor(tokenizer=a , image_processor=a) SCREAMING_SNAKE_CASE = torch.randn(1 , 27 , 38) SCREAMING_SNAKE_CASE = torch.randn(1 , 27 , 5_0257) SCREAMING_SNAKE_CASE = torch.randn(1 , 27 , 3_0522) SCREAMING_SNAKE_CASE = processor.batch_decode([char_input, bpe_input, wp_input]) self.assertListEqual(list(results.keys()) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'])
73
import heapq as hq import math from collections.abc import Iterator class _snake_case : def __init__( self , a) -> Optional[Any]: SCREAMING_SNAKE_CASE = str(id_) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = {} # {vertex:distance} def __lt__( self , a) -> Dict: return self.key < other.key def __repr__( self) -> Optional[Any]: return self.id def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]: self.neighbors.append(a) def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Tuple: SCREAMING_SNAKE_CASE = weight def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1]) graph[b - 1].add_neighbor(graph[a - 1]) # add the edges: graph[a - 1].add_edge(graph[b - 1] , _UpperCAmelCase) graph[b - 1].add_edge(graph[a - 1] , _UpperCAmelCase) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [] for u in graph: SCREAMING_SNAKE_CASE = math.inf SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = graph[:] while q: SCREAMING_SNAKE_CASE = min(_UpperCAmelCase) q.remove(_UpperCAmelCase) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): SCREAMING_SNAKE_CASE = u SCREAMING_SNAKE_CASE = u.edges[v.id] for i in range(1 , len(_UpperCAmelCase)): a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1)) return a def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): for u in graph: SCREAMING_SNAKE_CASE = math.inf SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = list(_UpperCAmelCase) hq.heapify(_UpperCAmelCase) while h: SCREAMING_SNAKE_CASE = hq.heappop(_UpperCAmelCase) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): SCREAMING_SNAKE_CASE = u SCREAMING_SNAKE_CASE = u.edges[v.id] hq.heapify(_UpperCAmelCase) for i in range(1 , len(_UpperCAmelCase)): yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1) def lowerCamelCase__ (): pass if __name__ == "__main__": import doctest doctest.testmod()
73
1
import argparse import json import subprocess def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = ( F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"''' ' https://api.github.com/repos/huggingface/transformers/actions/runners' ) SCREAMING_SNAKE_CASE = subprocess.run(_UpperCAmelCase , shell=_UpperCAmelCase , stdout=subprocess.PIPE) SCREAMING_SNAKE_CASE = output.stdout.decode('utf-8') SCREAMING_SNAKE_CASE = json.loads(_UpperCAmelCase) SCREAMING_SNAKE_CASE = status['runners'] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(_UpperCAmelCase) # save the result so we can report them on Slack with open('offline_runners.txt' , 'w') as fp: fp.write(json.dumps(_UpperCAmelCase)) if len(_UpperCAmelCase) > 0: SCREAMING_SNAKE_CASE = '\n'.join([x['name'] for x in offline_runners]) raise ValueError(F'''The following runners are offline:\n{failed}''') if __name__ == "__main__": def lowerCamelCase__ (_UpperCAmelCase): return values.split(',') a_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--target_runners', default=None, type=list_str, required=True, help='Comma-separated list of runners to check status.', ) parser.add_argument( '--token', default=None, type=str, required=True, help='A token that has actions:read permission.' ) a_ : Optional[int] = parser.parse_args() get_runner_status(args.target_runners, args.token)
73
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a_ : Optional[Any] = { 'configuration_mask2former': [ 'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Mask2FormerConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Union[str, Any] = ['Mask2FormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ 'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
73
1
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging a_ : Union[str, Any] = { 'cola': 2, 'mnli': 3, 'mrpc': 2, 'sst-2': 2, 'sts-b': 1, 'qqp': 2, 'qnli': 2, 'rte': 2, 'wnli': 2, } logging.set_verbosity_info() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None): # Initialise PyTorch model SCREAMING_SNAKE_CASE = XLNetConfig.from_json_file(_UpperCAmelCase) SCREAMING_SNAKE_CASE = finetuning_task.lower() if finetuning_task is not None else '' if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''') SCREAMING_SNAKE_CASE = finetuning_task SCREAMING_SNAKE_CASE = GLUE_TASKS_NUM_LABELS[finetuning_task] SCREAMING_SNAKE_CASE = XLNetForSequenceClassification(_UpperCAmelCase) elif "squad" in finetuning_task: SCREAMING_SNAKE_CASE = finetuning_task SCREAMING_SNAKE_CASE = XLNetForQuestionAnswering(_UpperCAmelCase) else: SCREAMING_SNAKE_CASE = XLNetLMHeadModel(_UpperCAmelCase) # Load weights from tf checkpoint load_tf_weights_in_xlnet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) # Save pytorch-model SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , _UpperCAmelCase) print(F'''Save PyTorch model to {os.path.abspath(_UpperCAmelCase)}''') torch.save(model.state_dict() , _UpperCAmelCase) print(F'''Save configuration file to {os.path.abspath(_UpperCAmelCase)}''') with open(_UpperCAmelCase , 'w' , encoding='utf-8') as f: f.write(config.to_json_string()) if __name__ == "__main__": a_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--xlnet_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained XLNet model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the folder to store the PyTorch model or dataset/vocab.', ) parser.add_argument( '--finetuning_task', default=None, type=str, help='Name of a task on which the XLNet TensorFlow model was fine-tuned', ) a_ : Dict = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
73
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Dict = logging.get_logger(__name__) a_ : Union[str, Any] = { 'edbeeching/decision-transformer-gym-hopper-medium': ( 'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class _snake_case ( A__ ): _lowercase : Optional[Any] = '''decision_transformer''' _lowercase : str = ['''past_key_values'''] _lowercase : Union[str, Any] = { '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , a=17 , a=4 , a=128 , a=4096 , a=True , a=1 , a=1024 , a=3 , a=1 , a=None , a="relu" , a=0.1 , a=0.1 , a=0.1 , a=1E-5 , a=0.02 , a=True , a=True , a=5_0256 , a=5_0256 , a=False , a=False , **a , ) -> List[str]: SCREAMING_SNAKE_CASE = state_dim SCREAMING_SNAKE_CASE = act_dim SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = max_ep_len SCREAMING_SNAKE_CASE = action_tanh SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = n_positions SCREAMING_SNAKE_CASE = n_layer SCREAMING_SNAKE_CASE = n_head SCREAMING_SNAKE_CASE = n_inner SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = resid_pdrop SCREAMING_SNAKE_CASE = embd_pdrop SCREAMING_SNAKE_CASE = attn_pdrop SCREAMING_SNAKE_CASE = layer_norm_epsilon SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = scale_attn_weights SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx SCREAMING_SNAKE_CASE = reorder_and_upcast_attn SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = eos_token_id super().__init__(bos_token_id=a , eos_token_id=a , **a)
73
1
import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = VideoMAEConfig() set_architecture_configs(_UpperCAmelCase , _UpperCAmelCase) if "finetuned" not in model_name: SCREAMING_SNAKE_CASE = False if "finetuned" in model_name: SCREAMING_SNAKE_CASE = 'huggingface/label-files' if "kinetics" in model_name: SCREAMING_SNAKE_CASE = 400 SCREAMING_SNAKE_CASE = 'kinetics400-id2label.json' elif "ssv2" in model_name: SCREAMING_SNAKE_CASE = 174 SCREAMING_SNAKE_CASE = 'something-something-v2-id2label.json' else: raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.') SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset') , 'r')) SCREAMING_SNAKE_CASE = {int(_UpperCAmelCase): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} return config def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): if "small" in model_name: SCREAMING_SNAKE_CASE = 384 SCREAMING_SNAKE_CASE = 1536 SCREAMING_SNAKE_CASE = 12 SCREAMING_SNAKE_CASE = 16 SCREAMING_SNAKE_CASE = 12 SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = 192 SCREAMING_SNAKE_CASE = 768 elif "large" in model_name: SCREAMING_SNAKE_CASE = 1024 SCREAMING_SNAKE_CASE = 4096 SCREAMING_SNAKE_CASE = 24 SCREAMING_SNAKE_CASE = 16 SCREAMING_SNAKE_CASE = 12 SCREAMING_SNAKE_CASE = 8 SCREAMING_SNAKE_CASE = 512 SCREAMING_SNAKE_CASE = 2048 elif "huge" in model_name: SCREAMING_SNAKE_CASE = 1280 SCREAMING_SNAKE_CASE = 5120 SCREAMING_SNAKE_CASE = 32 SCREAMING_SNAKE_CASE = 16 SCREAMING_SNAKE_CASE = 12 SCREAMING_SNAKE_CASE = 8 SCREAMING_SNAKE_CASE = 640 SCREAMING_SNAKE_CASE = 2560 elif "base" not in model_name: raise ValueError('Model name should include either "small", "base", "large", or "huge"') def lowerCamelCase__ (_UpperCAmelCase): if "encoder." in name: SCREAMING_SNAKE_CASE = name.replace('encoder.' , '') if "cls_token" in name: SCREAMING_SNAKE_CASE = name.replace('cls_token' , 'videomae.embeddings.cls_token') if "decoder_pos_embed" in name: SCREAMING_SNAKE_CASE = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed') if "pos_embed" in name and "decoder" not in name: SCREAMING_SNAKE_CASE = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings') if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection') if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE = name.replace('patch_embed.norm' , 'videomae.embeddings.norm') if "decoder.blocks" in name: SCREAMING_SNAKE_CASE = name.replace('decoder.blocks' , 'decoder.decoder_layers') if "blocks" in name: SCREAMING_SNAKE_CASE = name.replace('blocks' , 'videomae.encoder.layer') if "attn.proj" in name: SCREAMING_SNAKE_CASE = name.replace('attn.proj' , 'attention.output.dense') if "attn" in name and "bias" not in name: SCREAMING_SNAKE_CASE = name.replace('attn' , 'attention.self') if "attn" in name: SCREAMING_SNAKE_CASE = name.replace('attn' , 'attention.attention') if "norm1" in name: SCREAMING_SNAKE_CASE = name.replace('norm1' , 'layernorm_before') if "norm2" in name: SCREAMING_SNAKE_CASE = name.replace('norm2' , 'layernorm_after') if "mlp.fc1" in name: SCREAMING_SNAKE_CASE = name.replace('mlp.fc1' , 'intermediate.dense') if "mlp.fc2" in name: SCREAMING_SNAKE_CASE = name.replace('mlp.fc2' , 'output.dense') if "decoder_embed" in name: SCREAMING_SNAKE_CASE = name.replace('decoder_embed' , 'decoder.decoder_embed') if "decoder_norm" in name: SCREAMING_SNAKE_CASE = name.replace('decoder_norm' , 'decoder.decoder_norm') if "decoder_pred" in name: SCREAMING_SNAKE_CASE = name.replace('decoder_pred' , 'decoder.decoder_pred') if "norm.weight" in name and "decoder" not in name and "fc" not in name: SCREAMING_SNAKE_CASE = name.replace('norm.weight' , 'videomae.layernorm.weight') if "norm.bias" in name and "decoder" not in name and "fc" not in name: SCREAMING_SNAKE_CASE = name.replace('norm.bias' , 'videomae.layernorm.bias') if "head" in name and "decoder" not in name: SCREAMING_SNAKE_CASE = name.replace('head' , 'classifier') return name def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE = orig_state_dict.pop(_UpperCAmelCase) if key.startswith('encoder.'): SCREAMING_SNAKE_CASE = key.replace('encoder.' , '') if "qkv" in key: SCREAMING_SNAKE_CASE = key.split('.') if key.startswith('decoder.blocks'): SCREAMING_SNAKE_CASE = config.decoder_hidden_size SCREAMING_SNAKE_CASE = int(key_split[2]) SCREAMING_SNAKE_CASE = 'decoder.decoder_layers.' if "weight" in key: SCREAMING_SNAKE_CASE = val[:dim, :] SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE = val[-dim:, :] else: SCREAMING_SNAKE_CASE = config.hidden_size SCREAMING_SNAKE_CASE = int(key_split[1]) SCREAMING_SNAKE_CASE = 'videomae.encoder.layer.' if "weight" in key: SCREAMING_SNAKE_CASE = val[:dim, :] SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE = val[-dim:, :] else: SCREAMING_SNAKE_CASE = val return orig_state_dict def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset') SCREAMING_SNAKE_CASE = np.load(_UpperCAmelCase) return list(_UpperCAmelCase) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = get_videomae_config(_UpperCAmelCase) if "finetuned" in model_name: SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(_UpperCAmelCase) else: SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(_UpperCAmelCase) # download original checkpoint, hosted on Google Drive SCREAMING_SNAKE_CASE = 'pytorch_model.bin' gdown.cached_download(_UpperCAmelCase , _UpperCAmelCase , quiet=_UpperCAmelCase) SCREAMING_SNAKE_CASE = torch.load(_UpperCAmelCase , map_location='cpu') if "model" in files: SCREAMING_SNAKE_CASE = files['model'] else: SCREAMING_SNAKE_CASE = files['module'] SCREAMING_SNAKE_CASE = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase) model.load_state_dict(_UpperCAmelCase) model.eval() # verify model on basic input SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5]) SCREAMING_SNAKE_CASE = prepare_video() SCREAMING_SNAKE_CASE = image_processor(_UpperCAmelCase , return_tensors='pt') if "finetuned" not in model_name: SCREAMING_SNAKE_CASE = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt') SCREAMING_SNAKE_CASE = torch.load(_UpperCAmelCase) SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits SCREAMING_SNAKE_CASE = [ 'videomae-small-finetuned-kinetics', 'videomae-small-finetuned-ssv2', # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) 'videomae-base-short', 'videomae-base-short-finetuned-kinetics', 'videomae-base', 'videomae-base-finetuned-kinetics', 'videomae-large', 'videomae-large-finetuned-kinetics', 'videomae-huge-finetuned-kinetics', # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) 'videomae-base-short-ssv2', 'videomae-base-short-finetuned-ssv2', 'videomae-base-ssv2', 'videomae-base-finetuned-ssv2', ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": SCREAMING_SNAKE_CASE = torch.Size([1, 400]) SCREAMING_SNAKE_CASE = torch.tensor([-0.92_91, -0.40_61, -0.93_07]) elif model_name == "videomae-small-finetuned-ssv2": SCREAMING_SNAKE_CASE = torch.Size([1, 174]) SCREAMING_SNAKE_CASE = torch.tensor([0.26_71, -0.46_89, -0.82_35]) elif model_name == "videomae-base": SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536]) SCREAMING_SNAKE_CASE = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]]) elif model_name == "videomae-base-short": SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536]) SCREAMING_SNAKE_CASE = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]]) # we verified the loss both for normalized and unnormalized targets for this one SCREAMING_SNAKE_CASE = torch.tensor([0.51_42]) if config.norm_pix_loss else torch.tensor([0.64_69]) elif model_name == "videomae-large": SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536]) SCREAMING_SNAKE_CASE = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]]) elif model_name == "videomae-large-finetuned-kinetics": SCREAMING_SNAKE_CASE = torch.Size([1, 400]) SCREAMING_SNAKE_CASE = torch.tensor([0.07_71, 0.00_11, -0.36_25]) elif model_name == "videomae-huge-finetuned-kinetics": SCREAMING_SNAKE_CASE = torch.Size([1, 400]) SCREAMING_SNAKE_CASE = torch.tensor([0.24_33, 0.16_32, -0.48_94]) elif model_name == "videomae-base-short-finetuned-kinetics": SCREAMING_SNAKE_CASE = torch.Size([1, 400]) SCREAMING_SNAKE_CASE = torch.tensor([0.65_88, 0.09_90, -0.24_93]) elif model_name == "videomae-base-finetuned-kinetics": SCREAMING_SNAKE_CASE = torch.Size([1, 400]) SCREAMING_SNAKE_CASE = torch.tensor([0.36_69, -0.06_88, -0.24_21]) elif model_name == "videomae-base-short-ssv2": SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536]) SCREAMING_SNAKE_CASE = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]]) elif model_name == "videomae-base-short-finetuned-ssv2": SCREAMING_SNAKE_CASE = torch.Size([1, 174]) SCREAMING_SNAKE_CASE = torch.tensor([-0.05_37, -0.15_39, -0.32_66]) elif model_name == "videomae-base-ssv2": SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536]) SCREAMING_SNAKE_CASE = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]]) elif model_name == "videomae-base-finetuned-ssv2": SCREAMING_SNAKE_CASE = torch.Size([1, 174]) SCREAMING_SNAKE_CASE = torch.tensor([0.19_61, -0.83_37, -0.63_89]) else: raise ValueError(F'''Model name not supported. Should be one of {model_names}''') # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4) else: print('Logits:' , logits[0, :3, :3]) assert torch.allclose(logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4) print('Logits ok!') # verify loss, if applicable if model_name == "videomae-base-short": SCREAMING_SNAKE_CASE = outputs.loss assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-4) print('Loss ok!') if pytorch_dump_folder_path is not None: print(F'''Saving model and image processor to {pytorch_dump_folder_path}''') image_processor.save_pretrained(_UpperCAmelCase) model.save_pretrained(_UpperCAmelCase) if push_to_hub: print('Pushing to the hub...') model.push_to_hub(_UpperCAmelCase , organization='nielsr') if __name__ == "__main__": a_ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4', type=str, help=( 'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct' ' download link.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default='/Users/nielsrogge/Documents/VideoMAE/Test', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) a_ : Any = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
73
import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ : Optional[int] = 16 a_ : Any = 32 def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 16): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('bert-base-cased') SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc') def tokenize_function(_UpperCAmelCase): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels') def collate_fn(_UpperCAmelCase): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE = 8 else: SCREAMING_SNAKE_CASE = None return tokenizer.pad( _UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase) SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == 'fp8') , ) return train_dataloader, eval_dataloader def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): # Initialize accelerator SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE = config['lr'] SCREAMING_SNAKE_CASE = int(config['num_epochs']) SCREAMING_SNAKE_CASE = int(config['seed']) SCREAMING_SNAKE_CASE = int(config['batch_size']) SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc') # If the batch size is too big we use gradient accumulation SCREAMING_SNAKE_CASE = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: SCREAMING_SNAKE_CASE = batch_size // MAX_GPU_BATCH_SIZE SCREAMING_SNAKE_CASE = MAX_GPU_BATCH_SIZE set_seed(_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE = model.to(accelerator.device) # Instantiate optimizer SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=_UpperCAmelCase) # Instantiate scheduler SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) # Now we train the model for epoch in range(_UpperCAmelCase): model.train() for step, batch in enumerate(_UpperCAmelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.loss SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps accelerator.backward(_UpperCAmelCase) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCAmelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch['labels'])) metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) SCREAMING_SNAKE_CASE = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , _UpperCAmelCase) def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Simple example of training script.') parser.add_argument( '--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.') SCREAMING_SNAKE_CASE = parser.parse_args() SCREAMING_SNAKE_CASE = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(_UpperCAmelCase , _UpperCAmelCase) if __name__ == "__main__": main()
73
1
def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = generate_pascal_triangle(_UpperCAmelCase) for row_idx in range(_UpperCAmelCase): # Print left spaces for _ in range(num_rows - row_idx - 1): print(end=' ') # Print row values for col_idx in range(row_idx + 1): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=' ') else: print(triangle[row_idx][col_idx] , end='') print() def lowerCamelCase__ (_UpperCAmelCase): if not isinstance(_UpperCAmelCase , _UpperCAmelCase): raise TypeError('The input value of \'num_rows\' should be \'int\'') if num_rows == 0: return [] elif num_rows < 0: raise ValueError( 'The input value of \'num_rows\' should be greater than or equal to 0') SCREAMING_SNAKE_CASE = [] for current_row_idx in range(_UpperCAmelCase): SCREAMING_SNAKE_CASE = populate_current_row(_UpperCAmelCase , _UpperCAmelCase) triangle.append(_UpperCAmelCase) return triangle def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1, 1 for current_col_idx in range(1 , _UpperCAmelCase): calculate_current_element( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) return current_row def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): SCREAMING_SNAKE_CASE = triangle[current_row_idx - 1][current_col_idx - 1] SCREAMING_SNAKE_CASE = triangle[current_row_idx - 1][current_col_idx] SCREAMING_SNAKE_CASE = above_to_left_elt + above_to_right_elt def lowerCamelCase__ (_UpperCAmelCase): if not isinstance(_UpperCAmelCase , _UpperCAmelCase): raise TypeError('The input value of \'num_rows\' should be \'int\'') if num_rows == 0: return [] elif num_rows < 0: raise ValueError( 'The input value of \'num_rows\' should be greater than or equal to 0') SCREAMING_SNAKE_CASE = [[1]] for row_index in range(1 , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [0] + result[-1] + [0] SCREAMING_SNAKE_CASE = row_index + 1 # Calculate the number of distinct elements in a row SCREAMING_SNAKE_CASE = sum(divmod(_UpperCAmelCase , 2)) SCREAMING_SNAKE_CASE = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1) ] SCREAMING_SNAKE_CASE = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() SCREAMING_SNAKE_CASE = row_first_half + row_second_half result.append(_UpperCAmelCase) return result def lowerCamelCase__ (): from collections.abc import Callable from timeit import timeit def benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase) -> None: SCREAMING_SNAKE_CASE = F'''{func.__name__}({value})''' SCREAMING_SNAKE_CASE = timeit(F'''__main__.{call}''' , setup='import __main__') # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F'''{call:38} -- {timing:.4f} seconds''') for value in range(15): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
73
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a_ : int = { 'configuration_rag': ['RagConfig'], 'retrieval_rag': ['RagRetriever'], 'tokenization_rag': ['RagTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ 'RagModel', 'RagPreTrainedModel', 'RagSequenceForGeneration', 'RagTokenForGeneration', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Tuple = [ 'TFRagModel', 'TFRagPreTrainedModel', 'TFRagSequenceForGeneration', 'TFRagTokenForGeneration', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
73
1
import math from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : Optional[Any] = { 'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json', # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class _snake_case ( A__ ): _lowercase : Optional[Any] = '''data2vec-audio''' def __init__( self , a=32 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=0.1 , a=0.0 , a=0.1 , a=0.1 , a=0.02 , a=1E-5 , a="gelu" , a=(512, 512, 512, 512, 512, 512, 512) , a=(5, 2, 2, 2, 2, 2, 2) , a=(10, 3, 3, 3, 3, 2, 2) , a=False , a=16 , a=19 , a=5 , a=0.05 , a=10 , a=2 , a=0.0 , a=10 , a=0 , a="sum" , a=False , a=False , a=256 , a=(512, 512, 512, 512, 1500) , a=(5, 3, 3, 1, 1) , a=(1, 2, 3, 1, 1) , a=512 , a=0 , a=1 , a=2 , a=False , a=3 , a=2 , a=3 , a=None , **a , ) -> List[str]: super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = feat_extract_activation SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = conv_bias SCREAMING_SNAKE_CASE = num_conv_pos_embeddings SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups SCREAMING_SNAKE_CASE = conv_pos_kernel_size SCREAMING_SNAKE_CASE = len(self.conv_dim) SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = activation_dropout SCREAMING_SNAKE_CASE = feat_proj_dropout SCREAMING_SNAKE_CASE = final_dropout SCREAMING_SNAKE_CASE = layerdrop SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = use_weighted_layer_sum if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' f''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE = mask_time_prob SCREAMING_SNAKE_CASE = mask_time_length SCREAMING_SNAKE_CASE = mask_time_min_masks SCREAMING_SNAKE_CASE = mask_feature_prob SCREAMING_SNAKE_CASE = mask_feature_length SCREAMING_SNAKE_CASE = mask_feature_min_masks # ctc loss SCREAMING_SNAKE_CASE = ctc_loss_reduction SCREAMING_SNAKE_CASE = ctc_zero_infinity # adapter SCREAMING_SNAKE_CASE = add_adapter SCREAMING_SNAKE_CASE = adapter_kernel_size SCREAMING_SNAKE_CASE = adapter_stride SCREAMING_SNAKE_CASE = num_adapter_layers SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. SCREAMING_SNAKE_CASE = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = xvector_output_dim @property def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: return math.prod(self.conv_stride)
73
from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False): if radian_mode: return [magnitude * cos(_UpperCAmelCase), magnitude * sin(_UpperCAmelCase)] return [magnitude * cos(radians(_UpperCAmelCase)), magnitude * sin(radians(_UpperCAmelCase))] def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 10**-1): SCREAMING_SNAKE_CASE = cross(_UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = sum(_UpperCAmelCase) return abs(_UpperCAmelCase) < eps if __name__ == "__main__": # Test to check if it works a_ : int = array( [ polar_force(718.4, 1_80 - 30), polar_force(879.54, 45), polar_force(1_00, -90), ] ) a_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg a_ : Dict = array( [ polar_force(30 * 9.81, 15), polar_force(2_15, 1_80 - 45), polar_force(2_64, 90 - 30), ] ) a_ : Any = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg a_ : int = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]]) a_ : Optional[Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
73
1
from __future__ import annotations def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = array[indexa], array[indexa] def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): if length > 1: SCREAMING_SNAKE_CASE = int(length / 2) for i in range(_UpperCAmelCase , low + middle): comp_and_swap(_UpperCAmelCase , _UpperCAmelCase , i + middle , _UpperCAmelCase) bitonic_merge(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) bitonic_merge(_UpperCAmelCase , low + middle , _UpperCAmelCase , _UpperCAmelCase) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): if length > 1: SCREAMING_SNAKE_CASE = int(length / 2) bitonic_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1) bitonic_sort(_UpperCAmelCase , low + middle , _UpperCAmelCase , 0) bitonic_merge(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) if __name__ == "__main__": a_ : List[Any] = input('Enter numbers separated by a comma:\n').strip() a_ : str = [int(item.strip()) for item in user_input.split(',')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('\nSorted array in ascending order is: ', end='') print(*unsorted, sep=', ') bitonic_merge(unsorted, 0, len(unsorted), 0) print('Sorted array in descending order is: ', end='') print(*unsorted, sep=', ')
73
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Optional[int] = logging.get_logger(__name__) a_ : int = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class _snake_case ( A__ ): _lowercase : Dict = '''cvt''' def __init__( self , a=3 , a=[7, 3, 3] , a=[4, 2, 2] , a=[2, 1, 1] , a=[64, 192, 384] , a=[1, 3, 6] , a=[1, 2, 10] , a=[4.0, 4.0, 4.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.1] , a=[True, True, True] , a=[False, False, True] , a=["dw_bn", "dw_bn", "dw_bn"] , a=[3, 3, 3] , a=[1, 1, 1] , a=[2, 2, 2] , a=[1, 1, 1] , a=[1, 1, 1] , a=0.02 , a=1E-12 , **a , ) -> List[Any]: super().__init__(**a) SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = patch_sizes SCREAMING_SNAKE_CASE = patch_stride SCREAMING_SNAKE_CASE = patch_padding SCREAMING_SNAKE_CASE = embed_dim SCREAMING_SNAKE_CASE = num_heads SCREAMING_SNAKE_CASE = depth SCREAMING_SNAKE_CASE = mlp_ratio SCREAMING_SNAKE_CASE = attention_drop_rate SCREAMING_SNAKE_CASE = drop_rate SCREAMING_SNAKE_CASE = drop_path_rate SCREAMING_SNAKE_CASE = qkv_bias SCREAMING_SNAKE_CASE = cls_token SCREAMING_SNAKE_CASE = qkv_projection_method SCREAMING_SNAKE_CASE = kernel_qkv SCREAMING_SNAKE_CASE = padding_kv SCREAMING_SNAKE_CASE = stride_kv SCREAMING_SNAKE_CASE = padding_q SCREAMING_SNAKE_CASE = stride_q SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps
73
1
def lowerCamelCase__ (_UpperCAmelCase): if not isinstance(_UpperCAmelCase , _UpperCAmelCase): raise ValueError('multiplicative_persistence() only accepts integral values') if num < 0: raise ValueError('multiplicative_persistence() does not accept negative values') SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = str(_UpperCAmelCase) while len(_UpperCAmelCase) != 1: SCREAMING_SNAKE_CASE = [int(_UpperCAmelCase) for i in num_string] SCREAMING_SNAKE_CASE = 1 for i in range(0 , len(_UpperCAmelCase)): total *= numbers[i] SCREAMING_SNAKE_CASE = str(_UpperCAmelCase) steps += 1 return steps def lowerCamelCase__ (_UpperCAmelCase): if not isinstance(_UpperCAmelCase , _UpperCAmelCase): raise ValueError('additive_persistence() only accepts integral values') if num < 0: raise ValueError('additive_persistence() does not accept negative values') SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = str(_UpperCAmelCase) while len(_UpperCAmelCase) != 1: SCREAMING_SNAKE_CASE = [int(_UpperCAmelCase) for i in num_string] SCREAMING_SNAKE_CASE = 0 for i in range(0 , len(_UpperCAmelCase)): total += numbers[i] SCREAMING_SNAKE_CASE = str(_UpperCAmelCase) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
73
def lowerCamelCase__ (_UpperCAmelCase = 10 , _UpperCAmelCase = 1000 , _UpperCAmelCase = True): assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError('Invalid value for min_val or max_val (min_value < max_value)') return min_val if option else max_val def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return int((number_a + number_a) / 2) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) ), 'argument values must be type of "int"' if lower > higher: raise ValueError('argument value for lower and higher must be(lower > higher)') if not lower < to_guess < higher: raise ValueError( 'guess value must be within the range of lower and higher value') def answer(_UpperCAmelCase) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print('started...') SCREAMING_SNAKE_CASE = lower SCREAMING_SNAKE_CASE = higher SCREAMING_SNAKE_CASE = [] while True: SCREAMING_SNAKE_CASE = get_avg(_UpperCAmelCase , _UpperCAmelCase) last_numbers.append(_UpperCAmelCase) if answer(_UpperCAmelCase) == "low": SCREAMING_SNAKE_CASE = number elif answer(_UpperCAmelCase) == "high": SCREAMING_SNAKE_CASE = number else: break print(F'''guess the number : {last_numbers[-1]}''') print(F'''details : {last_numbers!s}''') def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = int(input('Enter lower value : ').strip()) SCREAMING_SNAKE_CASE = int(input('Enter high value : ').strip()) SCREAMING_SNAKE_CASE = int(input('Enter value to guess : ').strip()) guess_the_number(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) if __name__ == "__main__": main()
73
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging a_ : List[Any] = logging.get_logger(__name__) a_ : Any = {'vocab_file': 'spiece.model'} a_ : Any = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } a_ : Tuple = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) a_ : Tuple = 0 a_ : Optional[Any] = 1 a_ : Union[str, Any] = 2 a_ : List[Any] = 3 a_ : Dict = 4 class _snake_case ( A__ ): _lowercase : Optional[Any] = VOCAB_FILES_NAMES _lowercase : str = PRETRAINED_VOCAB_FILES_MAP _lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Tuple = '''left''' def __init__( self , a , a=False , a=True , a=False , a="<s>" , a="</s>" , a="<unk>" , a="<sep>" , a="<pad>" , a="<cls>" , a="<mask>" , a=["<eop>", "<eod>"] , a = None , **a , ) -> None: # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else mask_token SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , **a , ) SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = do_lower_case SCREAMING_SNAKE_CASE = remove_space SCREAMING_SNAKE_CASE = keep_accents SCREAMING_SNAKE_CASE = vocab_file SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(a) @property def SCREAMING_SNAKE_CASE__ ( self) -> str: return len(self.sp_model) def SCREAMING_SNAKE_CASE__ ( self) -> Dict: SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(a): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self) -> Any: SCREAMING_SNAKE_CASE = self.__dict__.copy() SCREAMING_SNAKE_CASE = None return state def __setstate__( self , a) -> Union[str, Any]: SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs'): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict: if self.remove_space: SCREAMING_SNAKE_CASE = ' '.join(inputs.strip().split()) else: SCREAMING_SNAKE_CASE = inputs SCREAMING_SNAKE_CASE = outputs.replace('``' , '"').replace('\'\'' , '"') if not self.keep_accents: SCREAMING_SNAKE_CASE = unicodedata.normalize('NFKD' , a) SCREAMING_SNAKE_CASE = ''.join([c for c in outputs if not unicodedata.combining(a)]) if self.do_lower_case: SCREAMING_SNAKE_CASE = outputs.lower() return outputs def SCREAMING_SNAKE_CASE__ ( self , a) -> List[str]: SCREAMING_SNAKE_CASE = self.preprocess_text(a) SCREAMING_SNAKE_CASE = self.sp_model.encode(a , out_type=a) SCREAMING_SNAKE_CASE = [] for piece in pieces: if len(a) > 1 and piece[-1] == str(',') and piece[-2].isdigit(): SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(piece[:-1].replace(a , '')) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: SCREAMING_SNAKE_CASE = cur_pieces[1:] else: SCREAMING_SNAKE_CASE = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(a) else: new_pieces.append(a) return new_pieces def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]: return self.sp_model.PieceToId(a) def SCREAMING_SNAKE_CASE__ ( self , a) -> int: return self.sp_model.IdToPiece(a) def SCREAMING_SNAKE_CASE__ ( self , a) -> Any: SCREAMING_SNAKE_CASE = ''.join(a).replace(a , ' ').strip() return out_string def SCREAMING_SNAKE_CASE__ ( self , a , a = False , a = None , a = True , **a , ) -> str: SCREAMING_SNAKE_CASE = kwargs.pop('use_source_tokenizer' , a) SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(a , skip_special_tokens=a) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(a)) SCREAMING_SNAKE_CASE = [] sub_texts.append(a) else: current_sub_text.append(a) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(a)) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens SCREAMING_SNAKE_CASE = ''.join(a) SCREAMING_SNAKE_CASE = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: SCREAMING_SNAKE_CASE = self.clean_up_tokenization(a) return clean_text else: return text def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]: SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a , token_ids_a=a , already_has_special_tokens=a) if token_ids_a is not None: return ([0] * len(a)) + [1] + ([0] * len(a)) + [1, 1] return ([0] * len(a)) + [1, 1] def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]: SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [2] if token_ids_a is None: return len(token_ids_a + sep) * [0] + cls_segment_id return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]: if not os.path.isdir(a): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''') return SCREAMING_SNAKE_CASE = os.path.join( a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(a) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , a) elif not os.path.isfile(self.vocab_file): with open(a , 'wb') as fi: SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(a) return (out_vocab_file,)
73
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class _snake_case : def __init__( self , a , a=13 , a=7 , a=True , a=True , a=False , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Union[str, Any]: SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_input_mask SCREAMING_SNAKE_CASE = use_token_type_ids SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = type_sequence_label_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = num_choices SCREAMING_SNAKE_CASE = scope def SCREAMING_SNAKE_CASE__ ( self) -> Dict: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE = None if self.use_input_mask: SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length]) SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices) SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , use_stable_embedding=a , ) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Any: SCREAMING_SNAKE_CASE = OpenLlamaModel(config=a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a) SCREAMING_SNAKE_CASE = model(a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = OpenLlamaModel(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , ) SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , ) SCREAMING_SNAKE_CASE = model(a , attention_mask=a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> int: SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a) model.to(a) model.eval() # first forward pass SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , ) SCREAMING_SNAKE_CASE = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size) SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1) SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0] SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0] # select random slice SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a , a , atol=1E-3)) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) = config_and_inputs SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _snake_case ( A__ , A__ , A__ , unittest.TestCase ): _lowercase : List[Any] = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) _lowercase : str = (OpenLlamaForCausalLM,) if is_torch_available() else () _lowercase : List[str] = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) _lowercase : List[str] = False _lowercase : Optional[int] = False def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , hidden_size=37) def SCREAMING_SNAKE_CASE__ ( self) -> str: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*a) def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = input_dict['input_ids'] SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a) SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = 'single_label_classification' SCREAMING_SNAKE_CASE = input_dict['input_ids'] SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a) SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = 'multi_label_classification' SCREAMING_SNAKE_CASE = input_dict['input_ids'] SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a) SCREAMING_SNAKE_CASE = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test') def SCREAMING_SNAKE_CASE__ ( self) -> Any: pass @parameterized.expand([('linear',), ('dynamic',)]) def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = ids_tensor([1, 10] , config.vocab_size) SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights SCREAMING_SNAKE_CASE = OpenLlamaModel(a) original_model.to(a) original_model.eval() SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0} SCREAMING_SNAKE_CASE = OpenLlamaModel(a) scaled_model.to(a) scaled_model.eval() SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(a , a , atol=1E-5)) else: self.assertFalse(torch.allclose(a , a , atol=1E-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(a , a , atol=1E-5))
73
1
import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device a_ : List[Any] = False class _snake_case ( unittest.TestCase ): pass @slow @require_torch_gpu class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: SCREAMING_SNAKE_CASE = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion') pipe.to(a) pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg') SCREAMING_SNAKE_CASE = torch.manual_seed(0) SCREAMING_SNAKE_CASE = pipe( image=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images SCREAMING_SNAKE_CASE = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
73
from __future__ import annotations a_ : str = [] def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): for i in range(len(_UpperCAmelCase)): if board[row][i] == 1: return False for i in range(len(_UpperCAmelCase)): if board[i][column] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , -1 , -1)): if board[i][j] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , len(_UpperCAmelCase))): if board[i][j] == 1: return False return True def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): if row >= len(_UpperCAmelCase): solution.append(_UpperCAmelCase) printboard(_UpperCAmelCase) print() return True for i in range(len(_UpperCAmelCase)): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = 1 solve(_UpperCAmelCase , row + 1) SCREAMING_SNAKE_CASE = 0 return False def lowerCamelCase__ (_UpperCAmelCase): for i in range(len(_UpperCAmelCase)): for j in range(len(_UpperCAmelCase)): if board[i][j] == 1: print('Q' , end=' ') else: print('.' , end=' ') print() # n=int(input("The no. of queens")) a_ : Tuple = 8 a_ : int = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
73
1
from __future__ import annotations a_ : str = [] def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): for i in range(len(_UpperCAmelCase)): if board[row][i] == 1: return False for i in range(len(_UpperCAmelCase)): if board[i][column] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , -1 , -1)): if board[i][j] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , len(_UpperCAmelCase))): if board[i][j] == 1: return False return True def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): if row >= len(_UpperCAmelCase): solution.append(_UpperCAmelCase) printboard(_UpperCAmelCase) print() return True for i in range(len(_UpperCAmelCase)): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = 1 solve(_UpperCAmelCase , row + 1) SCREAMING_SNAKE_CASE = 0 return False def lowerCamelCase__ (_UpperCAmelCase): for i in range(len(_UpperCAmelCase)): for j in range(len(_UpperCAmelCase)): if board[i][j] == 1: print('Q' , end=' ') else: print('.' , end=' ') print() # n=int(input("The no. of queens")) a_ : Tuple = 8 a_ : int = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
73
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _snake_case ( A__ , A__ , unittest.TestCase ): _lowercase : List[Any] = StableDiffusionDiffEditPipeline _lowercase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} _lowercase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} _lowercase : List[str] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _lowercase : List[str] = frozenset([] ) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: torch.manual_seed(0) SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , ) SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , ) SCREAMING_SNAKE_CASE = DDIMInverseScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_zero=a , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) SCREAMING_SNAKE_CASE = CLIPTextModel(a) SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') SCREAMING_SNAKE_CASE = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]: SCREAMING_SNAKE_CASE = floats_tensor((1, 16, 16) , rng=random.Random(a)).to(a) SCREAMING_SNAKE_CASE = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a)).to(a) if str(a).startswith('mps'): SCREAMING_SNAKE_CASE = torch.manual_seed(a) else: SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a) SCREAMING_SNAKE_CASE = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]: SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a) SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0] SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB') if str(a).startswith('mps'): SCREAMING_SNAKE_CASE = torch.manual_seed(a) else: SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a) SCREAMING_SNAKE_CASE = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> Optional[int]: SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a) SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0] SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB') if str(a).startswith('mps'): SCREAMING_SNAKE_CASE = torch.manual_seed(a) else: SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a) SCREAMING_SNAKE_CASE = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: if not hasattr(self.pipeline_class , '_optional_components'): return SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(a , a , a) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components}) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a) SCREAMING_SNAKE_CASE = pipe(**a)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(a) SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(a) pipe_loaded.to(a) pipe_loaded.set_progress_bar_config(disable=a) for optional_component in pipe._optional_components: self.assertTrue( getattr(a , a) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a) SCREAMING_SNAKE_CASE = pipe_loaded(**a)[0] SCREAMING_SNAKE_CASE = np.abs(output - output_loaded).max() self.assertLess(a , 1E-4) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = 'cpu' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = self.get_dummy_mask_inputs(a) SCREAMING_SNAKE_CASE = pipe.generate_mask(**a) SCREAMING_SNAKE_CASE = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16)) SCREAMING_SNAKE_CASE = np.array([0] * 9) SCREAMING_SNAKE_CASE = np.abs(mask_slice.flatten() - expected_slice).max() self.assertLessEqual(a , 1E-3) self.assertEqual(mask[0, -3, -4] , 0) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = 'cpu' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(a) SCREAMING_SNAKE_CASE = pipe.invert(**a).images SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3)) SCREAMING_SNAKE_CASE = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(a , 1E-3) def SCREAMING_SNAKE_CASE__ ( self) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=5E-3) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = 'cpu' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'} SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler(**a) SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler(**a) SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(a) SCREAMING_SNAKE_CASE = pipe.invert(**a).images SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3)) SCREAMING_SNAKE_CASE = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(a , 1E-3) @require_torch_gpu @slow class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self) -> Any: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def SCREAMING_SNAKE_CASE__ ( cls) -> List[Any]: SCREAMING_SNAKE_CASE = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png') SCREAMING_SNAKE_CASE = raw_image.convert('RGB').resize((768, 768)) SCREAMING_SNAKE_CASE = raw_image def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = torch.manual_seed(0) SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa) SCREAMING_SNAKE_CASE = DDIMScheduler.from_config(pipe.scheduler.config) SCREAMING_SNAKE_CASE = DDIMInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = 'a bowl of fruit' SCREAMING_SNAKE_CASE = 'a bowl of pears' SCREAMING_SNAKE_CASE = pipe.generate_mask( image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , ) SCREAMING_SNAKE_CASE = pipe.invert( prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a).latents SCREAMING_SNAKE_CASE = pipe( prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , output_type='numpy' , ).images[0] SCREAMING_SNAKE_CASE = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png').resize((768, 768))) / 255 ) assert np.abs((expected_image - image).max()) < 5E-1 def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = torch.manual_seed(0) SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa) SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = 'a bowl of fruit' SCREAMING_SNAKE_CASE = 'a bowl of pears' SCREAMING_SNAKE_CASE = pipe.generate_mask( image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , ) SCREAMING_SNAKE_CASE = pipe.invert( prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a , num_inference_steps=25 , ).latents SCREAMING_SNAKE_CASE = pipe( prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0] SCREAMING_SNAKE_CASE = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png').resize((768, 768))) / 255 ) assert np.abs((expected_image - image).max()) < 5E-1
73
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Tuple = logging.get_logger(__name__) a_ : Optional[int] = { 'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class _snake_case ( A__ ): _lowercase : Union[str, Any] = '''wav2vec2''' def __init__( self , a=32 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=0.1 , a=0.0 , a=0.0 , a=0.1 , a=0.1 , a=0.02 , a=1E-5 , a="group" , a="gelu" , a=(512, 512, 512, 512, 512, 512, 512) , a=(5, 2, 2, 2, 2, 2, 2) , a=(10, 3, 3, 3, 3, 2, 2) , a=False , a=128 , a=16 , a=False , a=True , a=0.05 , a=10 , a=2 , a=0.0 , a=10 , a=0 , a=320 , a=2 , a=0.1 , a=100 , a=256 , a=256 , a=0.1 , a="sum" , a=False , a=False , a=256 , a=(512, 512, 512, 512, 1500) , a=(5, 3, 3, 1, 1) , a=(1, 2, 3, 1, 1) , a=512 , a=0 , a=1 , a=2 , a=False , a=3 , a=2 , a=3 , a=None , a=None , **a , ) -> Tuple: super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = feat_extract_norm SCREAMING_SNAKE_CASE = feat_extract_activation SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = conv_bias SCREAMING_SNAKE_CASE = num_conv_pos_embeddings SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups SCREAMING_SNAKE_CASE = len(self.conv_dim) SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = activation_dropout SCREAMING_SNAKE_CASE = feat_proj_dropout SCREAMING_SNAKE_CASE = final_dropout SCREAMING_SNAKE_CASE = layerdrop SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = do_stable_layer_norm SCREAMING_SNAKE_CASE = use_weighted_layer_sum if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' f''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE = apply_spec_augment SCREAMING_SNAKE_CASE = mask_time_prob SCREAMING_SNAKE_CASE = mask_time_length SCREAMING_SNAKE_CASE = mask_time_min_masks SCREAMING_SNAKE_CASE = mask_feature_prob SCREAMING_SNAKE_CASE = mask_feature_length SCREAMING_SNAKE_CASE = mask_feature_min_masks # parameters for pretraining with codevector quantized representations SCREAMING_SNAKE_CASE = num_codevectors_per_group SCREAMING_SNAKE_CASE = num_codevector_groups SCREAMING_SNAKE_CASE = contrastive_logits_temperature SCREAMING_SNAKE_CASE = feat_quantizer_dropout SCREAMING_SNAKE_CASE = num_negatives SCREAMING_SNAKE_CASE = codevector_dim SCREAMING_SNAKE_CASE = proj_codevector_dim SCREAMING_SNAKE_CASE = diversity_loss_weight # ctc loss SCREAMING_SNAKE_CASE = ctc_loss_reduction SCREAMING_SNAKE_CASE = ctc_zero_infinity # adapter SCREAMING_SNAKE_CASE = add_adapter SCREAMING_SNAKE_CASE = adapter_kernel_size SCREAMING_SNAKE_CASE = adapter_stride SCREAMING_SNAKE_CASE = num_adapter_layers SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size SCREAMING_SNAKE_CASE = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. SCREAMING_SNAKE_CASE = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = xvector_output_dim @property def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: return functools.reduce(operator.mul , self.conv_stride , 1)
73
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : List[str] = logging.get_logger(__name__) a_ : Any = { 'microsoft/unispeech-large-1500h-cv': ( 'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class _snake_case ( A__ ): _lowercase : Optional[int] = '''unispeech''' def __init__( self , a=32 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=0.1 , a=0.0 , a=0.0 , a=0.1 , a=0.1 , a=0.02 , a=1E-5 , a="group" , a="gelu" , a=(512, 512, 512, 512, 512, 512, 512) , a=(5, 2, 2, 2, 2, 2, 2) , a=(10, 3, 3, 3, 3, 2, 2) , a=False , a=128 , a=16 , a=False , a=True , a=0.05 , a=10 , a=2 , a=0.0 , a=10 , a=0 , a=320 , a=2 , a=0.1 , a=100 , a=256 , a=256 , a=0.1 , a="mean" , a=False , a=False , a=256 , a=80 , a=0 , a=1 , a=2 , a=0.5 , **a , ) -> Optional[int]: super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = feat_extract_norm SCREAMING_SNAKE_CASE = feat_extract_activation SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = conv_bias SCREAMING_SNAKE_CASE = num_conv_pos_embeddings SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups SCREAMING_SNAKE_CASE = len(self.conv_dim) SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = activation_dropout SCREAMING_SNAKE_CASE = feat_proj_dropout SCREAMING_SNAKE_CASE = final_dropout SCREAMING_SNAKE_CASE = layerdrop SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = num_ctc_classes SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = do_stable_layer_norm SCREAMING_SNAKE_CASE = use_weighted_layer_sum SCREAMING_SNAKE_CASE = classifier_proj_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' f''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE = apply_spec_augment SCREAMING_SNAKE_CASE = mask_time_prob SCREAMING_SNAKE_CASE = mask_time_length SCREAMING_SNAKE_CASE = mask_time_min_masks SCREAMING_SNAKE_CASE = mask_feature_prob SCREAMING_SNAKE_CASE = mask_feature_length SCREAMING_SNAKE_CASE = mask_feature_min_masks # parameters for pretraining with codevector quantized representations SCREAMING_SNAKE_CASE = num_codevectors_per_group SCREAMING_SNAKE_CASE = num_codevector_groups SCREAMING_SNAKE_CASE = contrastive_logits_temperature SCREAMING_SNAKE_CASE = feat_quantizer_dropout SCREAMING_SNAKE_CASE = num_negatives SCREAMING_SNAKE_CASE = codevector_dim SCREAMING_SNAKE_CASE = proj_codevector_dim SCREAMING_SNAKE_CASE = diversity_loss_weight # ctc loss SCREAMING_SNAKE_CASE = ctc_loss_reduction SCREAMING_SNAKE_CASE = ctc_zero_infinity # pretraining loss SCREAMING_SNAKE_CASE = replace_prob @property def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: return functools.reduce(operator.mul , self.conv_stride , 1)
73
1
from random import randint, random def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = 5 , ): SCREAMING_SNAKE_CASE = [[-1] * number_of_cells] # Create a highway without any car SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = max(_UpperCAmelCase , 0) while i < number_of_cells: SCREAMING_SNAKE_CASE = ( randint(0 , _UpperCAmelCase) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = highway_now[car_index + 1 :] for cell in range(len(_UpperCAmelCase)): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(_UpperCAmelCase , -1) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) # Beforce calculations, the highway is empty SCREAMING_SNAKE_CASE = [-1] * number_of_cells for car_index in range(_UpperCAmelCase): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed SCREAMING_SNAKE_CASE = min(highway_now[car_index] + 1 , _UpperCAmelCase) # Number of empty cell before the next car SCREAMING_SNAKE_CASE = get_distance(_UpperCAmelCase , _UpperCAmelCase) - 1 # We can't have the car causing an accident SCREAMING_SNAKE_CASE = min(next_highway[car_index] , _UpperCAmelCase) if random() < probability: # Randomly, a driver will slow down SCREAMING_SNAKE_CASE = max(next_highway[car_index] - 1 , 0) return next_highway def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = len(highway[0]) for i in range(_UpperCAmelCase): SCREAMING_SNAKE_CASE = update(highway[i] , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = [-1] * number_of_cells for car_index in range(_UpperCAmelCase): SCREAMING_SNAKE_CASE = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) SCREAMING_SNAKE_CASE = (car_index + speed) % number_of_cells # Commit the change of position SCREAMING_SNAKE_CASE = speed highway.append(_UpperCAmelCase) return highway if __name__ == "__main__": import doctest doctest.testmod()
73
import argparse import collections import json import os import re import string import sys import numpy as np a_ : Optional[Any] = re.compile(R'\b(a|an|the)\b', re.UNICODE) a_ : List[str] = None def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.') parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.') parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.') parser.add_argument( '--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).') parser.add_argument( '--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.') parser.add_argument( '--na-prob-thresh' , '-t' , type=_UpperCAmelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , ) parser.add_argument( '--out-image-dir' , '-p' , metavar='out_images' , default=_UpperCAmelCase , help='Save precision-recall curves to directory.') parser.add_argument('--verbose' , '-v' , action='store_true') if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args() def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: SCREAMING_SNAKE_CASE = bool(qa['answers']['text']) return qid_to_has_ans def lowerCamelCase__ (_UpperCAmelCase): def remove_articles(_UpperCAmelCase): return ARTICLES_REGEX.sub(' ' , _UpperCAmelCase) def white_space_fix(_UpperCAmelCase): return " ".join(text.split()) def remove_punc(_UpperCAmelCase): SCREAMING_SNAKE_CASE = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(_UpperCAmelCase): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase)))) def lowerCamelCase__ (_UpperCAmelCase): if not s: return [] return normalize_answer(_UpperCAmelCase).split() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return int(normalize_answer(_UpperCAmelCase) == normalize_answer(_UpperCAmelCase)) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase) SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase) SCREAMING_SNAKE_CASE = collections.Counter(_UpperCAmelCase) & collections.Counter(_UpperCAmelCase) SCREAMING_SNAKE_CASE = sum(common.values()) if len(_UpperCAmelCase) == 0 or len(_UpperCAmelCase) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks) if num_same == 0: return 0 SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase) SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase) SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: SCREAMING_SNAKE_CASE = qa['id'] SCREAMING_SNAKE_CASE = [t for t in qa['answers']['text'] if normalize_answer(_UpperCAmelCase)] if not gold_answers: # For unanswerable questions, only correct answer is empty string SCREAMING_SNAKE_CASE = [''] if qid not in preds: print(F'''Missing prediction for {qid}''') continue SCREAMING_SNAKE_CASE = preds[qid] # Take max over all gold answers SCREAMING_SNAKE_CASE = max(compute_exact(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers) SCREAMING_SNAKE_CASE = max(compute_fa(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers) return exact_scores, fa_scores def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = {} for qid, s in scores.items(): SCREAMING_SNAKE_CASE = na_probs[qid] > na_prob_thresh if pred_na: SCREAMING_SNAKE_CASE = float(not qid_to_has_ans[qid]) else: SCREAMING_SNAKE_CASE = s return new_scores def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None): if not qid_list: SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) return collections.OrderedDict( [ ('exact', 1_00.0 * sum(exact_scores.values()) / total), ('f1', 1_00.0 * sum(fa_scores.values()) / total), ('total', total), ]) else: SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) return collections.OrderedDict( [ ('exact', 1_00.0 * sum(exact_scores[k] for k in qid_list) / total), ('f1', 1_00.0 * sum(fa_scores[k] for k in qid_list) / total), ('total', total), ]) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): for k in new_eval: SCREAMING_SNAKE_CASE = new_eval[k] def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): plt.step(_UpperCAmelCase , _UpperCAmelCase , color='b' , alpha=0.2 , where='post') plt.fill_between(_UpperCAmelCase , _UpperCAmelCase , step='post' , alpha=0.2 , color='b') plt.xlabel('Recall') plt.ylabel('Precision') plt.xlim([0.0, 1.05]) plt.ylim([0.0, 1.05]) plt.title(_UpperCAmelCase) plt.savefig(_UpperCAmelCase) plt.clf() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None): SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k]) SCREAMING_SNAKE_CASE = 0.0 SCREAMING_SNAKE_CASE = 1.0 SCREAMING_SNAKE_CASE = 0.0 SCREAMING_SNAKE_CASE = [1.0] SCREAMING_SNAKE_CASE = [0.0] SCREAMING_SNAKE_CASE = 0.0 for i, qid in enumerate(_UpperCAmelCase): if qid_to_has_ans[qid]: true_pos += scores[qid] SCREAMING_SNAKE_CASE = true_pos / float(i + 1) SCREAMING_SNAKE_CASE = true_pos / float(_UpperCAmelCase) if i == len(_UpperCAmelCase) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(_UpperCAmelCase) recalls.append(_UpperCAmelCase) if out_image: plot_pr_curve(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) return {"ap": 1_00.0 * avg_prec} def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): if out_image_dir and not os.path.exists(_UpperCAmelCase): os.makedirs(_UpperCAmelCase) SCREAMING_SNAKE_CASE = sum(1 for v in qid_to_has_ans.values() if v) if num_true_pos == 0: return SCREAMING_SNAKE_CASE = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_exact.png') , title='Precision-Recall curve for Exact Match score' , ) SCREAMING_SNAKE_CASE = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_f1.png') , title='Precision-Recall curve for F1 score' , ) SCREAMING_SNAKE_CASE = {k: float(_UpperCAmelCase) for k, v in qid_to_has_ans.items()} SCREAMING_SNAKE_CASE = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_oracle.png') , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , ) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_exact') merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_f1') merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_oracle') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): if not qid_list: return SCREAMING_SNAKE_CASE = [na_probs[k] for k in qid_list] SCREAMING_SNAKE_CASE = np.ones_like(_UpperCAmelCase) / float(len(_UpperCAmelCase)) plt.hist(_UpperCAmelCase , weights=_UpperCAmelCase , bins=20 , range=(0.0, 1.0)) plt.xlabel('Model probability of no-answer') plt.ylabel('Proportion of dataset') plt.title(F'''Histogram of no-answer probability: {name}''') plt.savefig(os.path.join(_UpperCAmelCase , F'''na_prob_hist_{name}.png''')) plt.clf() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) SCREAMING_SNAKE_CASE = num_no_ans SCREAMING_SNAKE_CASE = cur_score SCREAMING_SNAKE_CASE = 0.0 SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k]) for i, qid in enumerate(_UpperCAmelCase): if qid not in scores: continue if qid_to_has_ans[qid]: SCREAMING_SNAKE_CASE = scores[qid] else: if preds[qid]: SCREAMING_SNAKE_CASE = -1 else: SCREAMING_SNAKE_CASE = 0 cur_score += diff if cur_score > best_score: SCREAMING_SNAKE_CASE = cur_score SCREAMING_SNAKE_CASE = na_probs[qid] return 1_00.0 * best_score / len(_UpperCAmelCase), best_thresh def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = best_exact SCREAMING_SNAKE_CASE = exact_thresh SCREAMING_SNAKE_CASE = best_fa SCREAMING_SNAKE_CASE = fa_thresh def lowerCamelCase__ (): with open(OPTS.data_file) as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) SCREAMING_SNAKE_CASE = dataset_json['data'] with open(OPTS.pred_file) as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) if OPTS.na_prob_file: with open(OPTS.na_prob_file) as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) else: SCREAMING_SNAKE_CASE = {k: 0.0 for k in preds} SCREAMING_SNAKE_CASE = make_qid_to_has_ans(_UpperCAmelCase) # maps qid to True/False SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if v] SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if not v] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_raw_scores(_UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh) SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh) SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase) if has_ans_qids: SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'HasAns') if no_ans_qids: SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'NoAns') if OPTS.na_prob_file: find_all_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir) histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'hasAns') histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'noAns') if OPTS.out_file: with open(OPTS.out_file , 'w') as f: json.dump(_UpperCAmelCase , _UpperCAmelCase) else: print(json.dumps(_UpperCAmelCase , indent=2)) if __name__ == "__main__": a_ : Any = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt main()
73
1
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor a_ : Tuple = logging.get_logger(__name__) class _snake_case ( A__ ): def __init__( self , *a , **a) -> None: warnings.warn( 'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use BeitImageProcessor instead.' , a , ) super().__init__(*a , **a)
73
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor a_ : Dict = logging.get_logger(__name__) class _snake_case ( A__ ): def __init__( self , *a , **a) -> None: warnings.warn( 'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use GLPNImageProcessor instead.' , a , ) super().__init__(*a , **a)
73
1
a_ : Optional[int] = 9.80_665 def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = g): if fluid_density <= 0: raise ValueError('Impossible fluid density') if volume < 0: raise ValueError('Impossible Object volume') if gravity <= 0: raise ValueError('Impossible Gravity') return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
73
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class _snake_case ( unittest.TestCase , A__ ): def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = load_tool('text-classification') self.tool.setup() SCREAMING_SNAKE_CASE = load_tool('text-classification' , remote=a) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = self.tool('That\'s quite cool' , ['positive', 'negative']) self.assertEqual(a , 'positive') def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: SCREAMING_SNAKE_CASE = self.remote_tool('That\'s quite cool' , ['positive', 'negative']) self.assertEqual(a , 'positive') def SCREAMING_SNAKE_CASE__ ( self) -> int: SCREAMING_SNAKE_CASE = self.tool(text='That\'s quite cool' , labels=['positive', 'negative']) self.assertEqual(a , 'positive') def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative']) self.assertEqual(a , 'positive')
73
1
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) a_ : int = logging.getLogger(__name__) if __name__ == "__main__": a_ : List[str] = argparse.ArgumentParser( description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)' ) parser.add_argument( '--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.' ) parser.add_argument( '--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.' ) parser.add_argument('--vocab_size', default=3_05_22, type=int) a_ : Union[str, Any] = parser.parse_args() logger.info(f"""Loading data from {args.data_file}""") with open(args.data_file, 'rb') as fp: a_ : Any = pickle.load(fp) logger.info('Counting occurrences for MLM.') a_ : int = Counter() for tk_ids in data: counter.update(tk_ids) a_ : Optional[Any] = [0] * args.vocab_size for k, v in counter.items(): a_ : Dict = v logger.info(f"""Dump to {args.token_counts_dump}""") with open(args.token_counts_dump, 'wb') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
73
import sys import turtle def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): my_pen.up() my_pen.goto(vertexa[0] , vertexa[1]) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1]) my_pen.goto(vertexa[0] , vertexa[1]) my_pen.goto(vertexa[0] , vertexa[1]) if depth == 0: return triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1) triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1) triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( 'Correct format for using this script: ' 'python fractals.py <int:depth_for_fractal>' ) a_ : Any = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('red') a_ : str = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
73
1
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device a_ : str = False class _snake_case ( unittest.TestCase ): pass @nightly @require_torch_gpu class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa) pipe.to(a) pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg') SCREAMING_SNAKE_CASE = torch.manual_seed(0) SCREAMING_SNAKE_CASE = pipe.dual_guided( prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a) SCREAMING_SNAKE_CASE = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa) pipe.to(a) pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = generator.manual_seed(0) SCREAMING_SNAKE_CASE = pipe.dual_guided( prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images assert np.abs(image - new_image).sum() < 1E-5, "Models don't have the same forward pass" def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa) pipe.to(a) pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = 'cyberpunk 2077' SCREAMING_SNAKE_CASE = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg') SCREAMING_SNAKE_CASE = torch.manual_seed(0) SCREAMING_SNAKE_CASE = pipe.dual_guided( prompt=a , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images SCREAMING_SNAKE_CASE = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1 SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger ' SCREAMING_SNAKE_CASE = torch.manual_seed(0) SCREAMING_SNAKE_CASE = pipe.text_to_image( prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy').images SCREAMING_SNAKE_CASE = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1 SCREAMING_SNAKE_CASE = pipe.image_variation(a , generator=a , output_type='numpy').images SCREAMING_SNAKE_CASE = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
73
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed a_ : Any = 'true' def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=16): set_seed(42) SCREAMING_SNAKE_CASE = RegressionModel() SCREAMING_SNAKE_CASE = deepcopy(_UpperCAmelCase) SCREAMING_SNAKE_CASE = RegressionDataset(length=_UpperCAmelCase) SCREAMING_SNAKE_CASE = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase) model.to(accelerator.device) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase) return model, ddp_model, dataloader def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased') SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' , split='validation') def tokenize_function(_UpperCAmelCase): SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase) return outputs with accelerator.main_process_first(): SCREAMING_SNAKE_CASE = dataset.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels') def collate_fn(_UpperCAmelCase): if use_longest: return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt') return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt') return DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=16) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = Accelerator(dispatch_batches=_UpperCAmelCase , split_batches=_UpperCAmelCase) SCREAMING_SNAKE_CASE = get_dataloader(_UpperCAmelCase , not dispatch_batches) SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained( 'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [] for batch in dataloader: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((logit, target)) logits_and_targets.append((logit, target)) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], [] for logit, targ in logits_and_targets: logits.append(_UpperCAmelCase) targs.append(_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.cat(_UpperCAmelCase), torch.cat(_UpperCAmelCase) return logits, targs def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=16): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_basic_setup(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_predictions(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) assert ( len(_UpperCAmelCase) == num_samples ), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCAmelCase)}''' def lowerCamelCase__ (_UpperCAmelCase = False , _UpperCAmelCase = False): SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc') SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_mrpc_setup(_UpperCAmelCase , _UpperCAmelCase) # First do baseline SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['no'] model.to(_UpperCAmelCase) model.eval() for batch in dataloader: batch.to(_UpperCAmelCase) with torch.inference_mode(): SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1) metric.add_batch(predictions=_UpperCAmelCase , references=batch['labels']) SCREAMING_SNAKE_CASE = metric.compute() # Then do distributed SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['ddp'] model.eval() for batch in dataloader: with torch.inference_mode(): SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1) SCREAMING_SNAKE_CASE = batch['labels'] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((preds, references)) metric.add_batch(predictions=_UpperCAmelCase , references=_UpperCAmelCase) SCREAMING_SNAKE_CASE = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key]), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('**Testing gather_for_metrics**') for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''') test_mrpc(_UpperCAmelCase , _UpperCAmelCase) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test torch metrics**') for split_batches in [True, False]: for dispatch_batches in [True, False]: SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase) if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''') test_torch_metrics(_UpperCAmelCase , 99) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test last batch is not dropped when perfectly divisible**') SCREAMING_SNAKE_CASE = Accelerator() test_torch_metrics(_UpperCAmelCase , 512) accelerator.state._reset_state() def lowerCamelCase__ (_UpperCAmelCase): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
73
1
from __future__ import annotations def lowerCamelCase__ (_UpperCAmelCase = 4): SCREAMING_SNAKE_CASE = abs(_UpperCAmelCase) or 4 return [[1 + x + y * row_size for x in range(_UpperCAmelCase)] for y in range(_UpperCAmelCase)] def lowerCamelCase__ (_UpperCAmelCase): return reverse_row(transpose(_UpperCAmelCase)) # OR.. transpose(reverse_column(matrix)) def lowerCamelCase__ (_UpperCAmelCase): return reverse_row(reverse_column(_UpperCAmelCase)) # OR.. reverse_column(reverse_row(matrix)) def lowerCamelCase__ (_UpperCAmelCase): return reverse_column(transpose(_UpperCAmelCase)) # OR.. transpose(reverse_row(matrix)) def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = [list(_UpperCAmelCase) for x in zip(*_UpperCAmelCase)] return matrix def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = matrix[::-1] return matrix def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = [x[::-1] for x in matrix] return matrix def lowerCamelCase__ (_UpperCAmelCase): for i in matrix: print(*_UpperCAmelCase) if __name__ == "__main__": a_ : Optional[Any] = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) a_ : Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) a_ : List[str] = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
73
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available a_ : List[str] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ['GPTSw3Tokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys a_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
73
1
import math def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = [True] * n SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = True for i in range(3 , int(n**0.5 + 1) , 2): SCREAMING_SNAKE_CASE = i * 2 while index < n: SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = index + i SCREAMING_SNAKE_CASE = [2] for i in range(3 , _UpperCAmelCase , 2): if is_prime[i]: primes.append(_UpperCAmelCase) return primes def lowerCamelCase__ (_UpperCAmelCase = 9999_6666_3333): SCREAMING_SNAKE_CASE = math.floor(math.sqrt(_UpperCAmelCase)) + 100 SCREAMING_SNAKE_CASE = prime_sieve(_UpperCAmelCase) SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = primes[prime_index] while (last_prime**2) <= limit: SCREAMING_SNAKE_CASE = primes[prime_index + 1] SCREAMING_SNAKE_CASE = last_prime**2 SCREAMING_SNAKE_CASE = next_prime**2 # Get numbers divisible by lps(current) SCREAMING_SNAKE_CASE = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) SCREAMING_SNAKE_CASE = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps SCREAMING_SNAKE_CASE = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair SCREAMING_SNAKE_CASE = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
73
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path a_ : str = [ {'dataset': 'wikipedia', 'config_name': '20220301.de'}, {'dataset': 'wikipedia', 'config_name': '20220301.en'}, {'dataset': 'wikipedia', 'config_name': '20220301.fr'}, {'dataset': 'wikipedia', 'config_name': '20220301.frr'}, {'dataset': 'wikipedia', 'config_name': '20220301.it'}, {'dataset': 'wikipedia', 'config_name': '20220301.simple'}, {'dataset': 'snli', 'config_name': 'plain_text'}, {'dataset': 'eli5', 'config_name': 'LFQA_reddit'}, {'dataset': 'wiki40b', 'config_name': 'en'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'}, {'dataset': 'natural_questions', 'config_name': 'default'}, ] def lowerCamelCase__ (_UpperCAmelCase=True): if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A__ ) ) class _snake_case ( A__ ): _lowercase : Optional[Any] = None _lowercase : Optional[Any] = None def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Optional[Any]: with TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE = dataset_module_factory(a , cache_dir=a) SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=a) SCREAMING_SNAKE_CASE = builder_cls( cache_dir=a , config_name=a , hash=dataset_module.hash , ) SCREAMING_SNAKE_CASE = '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=a).replace(os.sep , '/'), config.DATASET_INFO_FILENAME, ]) SCREAMING_SNAKE_CASE = cached_path(a , cache_dir=a) self.assertTrue(os.path.exists(a)) @pytest.mark.integration def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('test_hf_gcp') / 'test_wikipedia_simple' SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase) SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path) SCREAMING_SNAKE_CASE = builder_cls( cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam SCREAMING_SNAKE_CASE = None builder_instance.download_and_prepare() SCREAMING_SNAKE_CASE = builder_instance.as_dataset() assert ds @pytest.mark.integration def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase) SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=_UpperCAmelCase) SCREAMING_SNAKE_CASE = builder_cls( cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , ) SCREAMING_SNAKE_CASE = builder_instance.as_streaming_dataset() assert ds assert isinstance(_UpperCAmelCase , _UpperCAmelCase) assert "train" in ds assert isinstance(ds['train'] , _UpperCAmelCase) assert next(iter(ds['train']))
73
1
import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ : Optional[int] = 16 a_ : Any = 32 def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 16): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('bert-base-cased') SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc') def tokenize_function(_UpperCAmelCase): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels') def collate_fn(_UpperCAmelCase): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE = 8 else: SCREAMING_SNAKE_CASE = None return tokenizer.pad( _UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase) SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == 'fp8') , ) return train_dataloader, eval_dataloader def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): # Initialize accelerator SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE = config['lr'] SCREAMING_SNAKE_CASE = int(config['num_epochs']) SCREAMING_SNAKE_CASE = int(config['seed']) SCREAMING_SNAKE_CASE = int(config['batch_size']) SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc') # If the batch size is too big we use gradient accumulation SCREAMING_SNAKE_CASE = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: SCREAMING_SNAKE_CASE = batch_size // MAX_GPU_BATCH_SIZE SCREAMING_SNAKE_CASE = MAX_GPU_BATCH_SIZE set_seed(_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE = model.to(accelerator.device) # Instantiate optimizer SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=_UpperCAmelCase) # Instantiate scheduler SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) # Now we train the model for epoch in range(_UpperCAmelCase): model.train() for step, batch in enumerate(_UpperCAmelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.loss SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps accelerator.backward(_UpperCAmelCase) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCAmelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch['labels'])) metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) SCREAMING_SNAKE_CASE = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , _UpperCAmelCase) def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Simple example of training script.') parser.add_argument( '--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.') SCREAMING_SNAKE_CASE = parser.parse_args() SCREAMING_SNAKE_CASE = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(_UpperCAmelCase , _UpperCAmelCase) if __name__ == "__main__": main()
73
from __future__ import annotations def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = 2 SCREAMING_SNAKE_CASE = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(_UpperCAmelCase) if n > 1: factors.append(_UpperCAmelCase) return factors if __name__ == "__main__": import doctest doctest.testmod()
73
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png' SCREAMING_SNAKE_CASE = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase).raw).convert('RGB') return image def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding')) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding')) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight')) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias')) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight')) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias')) for i in range(config.vision_config.num_hidden_layers): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''')) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''')) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''')) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''')) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''')) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',)) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''')) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''')) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''')) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''')) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''')) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight')) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias')) # fmt: on return rename_keys def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = dct.pop(_UpperCAmelCase) SCREAMING_SNAKE_CASE = val def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): for i in range(config.vision_config.num_hidden_layers): # read in original q and v biases SCREAMING_SNAKE_CASE = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''') SCREAMING_SNAKE_CASE = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''') # next, set bias in the state dict SCREAMING_SNAKE_CASE = torch.cat((q_bias, torch.zeros_like(_UpperCAmelCase , requires_grad=_UpperCAmelCase), v_bias)) SCREAMING_SNAKE_CASE = qkv_bias def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = 364 if 'coco' in model_name else 224 SCREAMING_SNAKE_CASE = BlipaVisionConfig(image_size=_UpperCAmelCase).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_UpperCAmelCase).to_dict() elif "opt-6.7b" in model_name: SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_UpperCAmelCase).to_dict() elif "t5-xl" in model_name: SCREAMING_SNAKE_CASE = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1).to_dict() elif "t5-xxl" in model_name: SCREAMING_SNAKE_CASE = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1).to_dict() SCREAMING_SNAKE_CASE = BlipaConfig(vision_config=_UpperCAmelCase , text_config=_UpperCAmelCase) return config, image_size @torch.no_grad() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False): SCREAMING_SNAKE_CASE = ( AutoTokenizer.from_pretrained('facebook/opt-2.7b') if 'opt' in model_name else AutoTokenizer.from_pretrained('google/flan-t5-xl') ) SCREAMING_SNAKE_CASE = tokenizer('\n' , add_special_tokens=_UpperCAmelCase).input_ids[0] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_blipa_config(_UpperCAmelCase , eos_token_id=_UpperCAmelCase) SCREAMING_SNAKE_CASE = BlipaForConditionalGeneration(_UpperCAmelCase).eval() SCREAMING_SNAKE_CASE = { 'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'), 'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'), 'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'), 'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'), 'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'), 'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'), 'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'), } SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model_name_to_original[model_name] # load original model print('Loading original model...') SCREAMING_SNAKE_CASE = 'cuda' if torch.cuda.is_available() else 'cpu' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = load_model_and_preprocess( name=_UpperCAmelCase , model_type=_UpperCAmelCase , is_eval=_UpperCAmelCase , device=_UpperCAmelCase) original_model.eval() print('Done!') # update state dict keys SCREAMING_SNAKE_CASE = original_model.state_dict() SCREAMING_SNAKE_CASE = create_rename_keys(_UpperCAmelCase) for src, dest in rename_keys: rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): SCREAMING_SNAKE_CASE = state_dict.pop(_UpperCAmelCase) if key.startswith('Qformer.bert'): SCREAMING_SNAKE_CASE = key.replace('Qformer.bert' , 'qformer') if "attention.self" in key: SCREAMING_SNAKE_CASE = key.replace('self' , 'attention') if "opt_proj" in key: SCREAMING_SNAKE_CASE = key.replace('opt_proj' , 'language_projection') if "t5_proj" in key: SCREAMING_SNAKE_CASE = key.replace('t5_proj' , 'language_projection') if key.startswith('opt'): SCREAMING_SNAKE_CASE = key.replace('opt' , 'language') if key.startswith('t5'): SCREAMING_SNAKE_CASE = key.replace('t5' , 'language') SCREAMING_SNAKE_CASE = val # read in qv biases read_in_q_v_bias(_UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = hf_model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase) assert len(_UpperCAmelCase) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] SCREAMING_SNAKE_CASE = load_demo_image() SCREAMING_SNAKE_CASE = vis_processors['eval'](_UpperCAmelCase).unsqueeze(0).to(_UpperCAmelCase) SCREAMING_SNAKE_CASE = tokenizer(['\n'] , return_tensors='pt').input_ids.to(_UpperCAmelCase) # create processor SCREAMING_SNAKE_CASE = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase) SCREAMING_SNAKE_CASE = BlipaProcessor(image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase) SCREAMING_SNAKE_CASE = processor(images=_UpperCAmelCase , return_tensors='pt').pixel_values.to(_UpperCAmelCase) # make sure processor creates exact same pixel values assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase) original_model.to(_UpperCAmelCase) hf_model.to(_UpperCAmelCase) with torch.no_grad(): if "opt" in model_name: SCREAMING_SNAKE_CASE = original_model({'image': original_pixel_values, 'text_input': ['']}).logits SCREAMING_SNAKE_CASE = hf_model(_UpperCAmelCase , _UpperCAmelCase).logits else: SCREAMING_SNAKE_CASE = original_model( {'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']}).logits SCREAMING_SNAKE_CASE = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100) SCREAMING_SNAKE_CASE = hf_model(_UpperCAmelCase , _UpperCAmelCase , labels=_UpperCAmelCase).logits assert original_logits.shape == logits.shape print('First values of original logits:' , original_logits[0, :3, :3]) print('First values of HF logits:' , logits[0, :3, :3]) # assert values if model_name == "blip2-flan-t5-xl": SCREAMING_SNAKE_CASE = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=_UpperCAmelCase) assert torch.allclose(logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4) elif model_name == "blip2-flan-t5-xl-coco": SCREAMING_SNAKE_CASE = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=_UpperCAmelCase) else: # cast to same type SCREAMING_SNAKE_CASE = logits.dtype assert torch.allclose(original_logits.to(_UpperCAmelCase) , _UpperCAmelCase , atol=1e-2) print('Looks ok!') print('Generating a caption...') SCREAMING_SNAKE_CASE = '' SCREAMING_SNAKE_CASE = tokenizer(_UpperCAmelCase , return_tensors='pt').input_ids.to(_UpperCAmelCase) SCREAMING_SNAKE_CASE = original_model.generate({'image': original_pixel_values}) SCREAMING_SNAKE_CASE = hf_model.generate( _UpperCAmelCase , _UpperCAmelCase , do_sample=_UpperCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print('Original generation:' , _UpperCAmelCase) SCREAMING_SNAKE_CASE = input_ids.shape[1] SCREAMING_SNAKE_CASE = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_UpperCAmelCase) SCREAMING_SNAKE_CASE = [text.strip() for text in output_text] print('HF generation:' , _UpperCAmelCase) if pytorch_dump_folder_path is not None: processor.save_pretrained(_UpperCAmelCase) hf_model.save_pretrained(_UpperCAmelCase) if push_to_hub: processor.push_to_hub(F'''nielsr/{model_name}''') hf_model.push_to_hub(F'''nielsr/{model_name}''') if __name__ == "__main__": a_ : str = argparse.ArgumentParser() a_ : Optional[int] = [ 'blip2-opt-2.7b', 'blip2-opt-6.7b', 'blip2-opt-2.7b-coco', 'blip2-opt-6.7b-coco', 'blip2-flan-t5-xl', 'blip2-flan-t5-xl-coco', 'blip2-flan-t5-xxl', ] parser.add_argument( '--model_name', default='blip2-opt-2.7b', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) a_ : int = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
73
import math import os import sys def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = '' try: with open(_UpperCAmelCase , 'rb') as binary_file: SCREAMING_SNAKE_CASE = binary_file.read() for dat in data: SCREAMING_SNAKE_CASE = F'''{dat:08b}''' result += curr_byte return result except OSError: print('File not accessible') sys.exit() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): lexicon.pop(_UpperCAmelCase) SCREAMING_SNAKE_CASE = last_match_id if math.loga(_UpperCAmelCase).is_integer(): for curr_key in lexicon: SCREAMING_SNAKE_CASE = '0' + lexicon[curr_key] SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:] def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = {'0': '0', '1': '1'} SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = '', '' SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) for i in range(len(_UpperCAmelCase)): curr_string += data_bits[i] if curr_string not in lexicon: continue SCREAMING_SNAKE_CASE = lexicon[curr_string] result += last_match_id add_key_to_lexicon(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) index += 1 SCREAMING_SNAKE_CASE = '' while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": SCREAMING_SNAKE_CASE = lexicon[curr_string] result += last_match_id return result def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = os.path.getsize(_UpperCAmelCase) SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:] SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) return "0" * (length_length - 1) + file_length_binary + compressed def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = 8 try: with open(_UpperCAmelCase , 'wb') as opened_file: SCREAMING_SNAKE_CASE = [ to_write[i : i + byte_length] for i in range(0 , len(_UpperCAmelCase) , _UpperCAmelCase) ] if len(result_byte_array[-1]) % byte_length == 0: result_byte_array.append('10000000') else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1]) - 1 ) for elem in result_byte_array: opened_file.write(int(_UpperCAmelCase , 2).to_bytes(1 , byteorder='big')) except OSError: print('File not accessible') sys.exit() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = read_file_binary(_UpperCAmelCase) SCREAMING_SNAKE_CASE = compress_data(_UpperCAmelCase) SCREAMING_SNAKE_CASE = add_file_length(_UpperCAmelCase , _UpperCAmelCase) write_file_binary(_UpperCAmelCase , _UpperCAmelCase) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
73
1
import numpy as np def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1e-12 , _UpperCAmelCase = 100 , ): assert np.shape(_UpperCAmelCase)[0] == np.shape(_UpperCAmelCase)[1] # Ensure proper dimensionality. assert np.shape(_UpperCAmelCase)[0] == np.shape(_UpperCAmelCase)[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(_UpperCAmelCase) == np.iscomplexobj(_UpperCAmelCase) SCREAMING_SNAKE_CASE = np.iscomplexobj(_UpperCAmelCase) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(_UpperCAmelCase , input_matrix.conj().T) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 1e12 while not convergence: # Multiple matrix by the vector. SCREAMING_SNAKE_CASE = np.dot(_UpperCAmelCase , _UpperCAmelCase) # Normalize the resulting output vector. SCREAMING_SNAKE_CASE = w / np.linalg.norm(_UpperCAmelCase) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) SCREAMING_SNAKE_CASE = vector.conj().T if is_complex else vector.T SCREAMING_SNAKE_CASE = np.dot(_UpperCAmelCase , np.dot(_UpperCAmelCase , _UpperCAmelCase)) # Check convergence. SCREAMING_SNAKE_CASE = np.abs(lambda_ - lambda_previous) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = lambda_ if is_complex: SCREAMING_SNAKE_CASE = np.real(lambda_) return lambda_, vector def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]]) SCREAMING_SNAKE_CASE = np.array([41, 4, 20]) SCREAMING_SNAKE_CASE = real_input_matrix.astype(np.complexaaa) SCREAMING_SNAKE_CASE = np.triu(1j * complex_input_matrix , 1) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T SCREAMING_SNAKE_CASE = np.array([41, 4, 20]).astype(np.complexaaa) for problem_type in ["real", "complex"]: if problem_type == "real": SCREAMING_SNAKE_CASE = real_input_matrix SCREAMING_SNAKE_CASE = real_vector elif problem_type == "complex": SCREAMING_SNAKE_CASE = complex_input_matrix SCREAMING_SNAKE_CASE = complex_vector # Our implementation. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = power_iteration(_UpperCAmelCase , _UpperCAmelCase) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = np.linalg.eigh(_UpperCAmelCase) # Last eigenvalue is the maximum one. SCREAMING_SNAKE_CASE = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. SCREAMING_SNAKE_CASE = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max) <= 1e-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(_UpperCAmelCase) - np.abs(_UpperCAmelCase)) <= 1e-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
73
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def lowerCamelCase__ (_UpperCAmelCase): return 1.0 / (1.0 + np.exp(-_outputs)) def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = np.max(_outputs , axis=-1 , keepdims=_UpperCAmelCase) SCREAMING_SNAKE_CASE = np.exp(_outputs - maxes) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_UpperCAmelCase) class _snake_case ( A__ ): _lowercase : Tuple = '''sigmoid''' _lowercase : List[str] = '''softmax''' _lowercase : Tuple = '''none''' @add_end_docstrings( A__ , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class _snake_case ( A__ ): _lowercase : Optional[Any] = False _lowercase : Tuple = ClassificationFunction.NONE def __init__( self , **a) -> Optional[Any]: super().__init__(**a) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING) def SCREAMING_SNAKE_CASE__ ( self , a=None , a=None , a="" , **a) -> Tuple: # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" SCREAMING_SNAKE_CASE = tokenizer_kwargs SCREAMING_SNAKE_CASE = {} if hasattr(self.model.config , 'return_all_scores') and return_all_scores is None: SCREAMING_SNAKE_CASE = self.model.config.return_all_scores if isinstance(a , a) or top_k is None: SCREAMING_SNAKE_CASE = top_k SCREAMING_SNAKE_CASE = False elif return_all_scores is not None: warnings.warn( '`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of' ' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , a , ) if return_all_scores: SCREAMING_SNAKE_CASE = None else: SCREAMING_SNAKE_CASE = 1 if isinstance(a , a): SCREAMING_SNAKE_CASE = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: SCREAMING_SNAKE_CASE = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *a , **a) -> Optional[int]: SCREAMING_SNAKE_CASE = super().__call__(*a , **a) # TODO try and retrieve it in a nicer way from _sanitize_parameters. SCREAMING_SNAKE_CASE = 'top_k' not in kwargs if isinstance(args[0] , a) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def SCREAMING_SNAKE_CASE__ ( self , a , **a) -> Dict[str, GenericTensor]: SCREAMING_SNAKE_CASE = self.framework if isinstance(a , a): return self.tokenizer(**a , return_tensors=a , **a) elif isinstance(a , a) and len(a) == 1 and isinstance(inputs[0] , a) and len(inputs[0]) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a , **a) elif isinstance(a , a): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( 'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a' ' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.') return self.tokenizer(a , return_tensors=a , **a) def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]: return self.model(**a) def SCREAMING_SNAKE_CASE__ ( self , a , a=None , a=1 , a=True) -> Any: # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: SCREAMING_SNAKE_CASE = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: SCREAMING_SNAKE_CASE = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , 'function_to_apply') and function_to_apply is None: SCREAMING_SNAKE_CASE = self.model.config.function_to_apply else: SCREAMING_SNAKE_CASE = ClassificationFunction.NONE SCREAMING_SNAKE_CASE = model_outputs['logits'][0] SCREAMING_SNAKE_CASE = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: SCREAMING_SNAKE_CASE = sigmoid(a) elif function_to_apply == ClassificationFunction.SOFTMAX: SCREAMING_SNAKE_CASE = softmax(a) elif function_to_apply == ClassificationFunction.NONE: SCREAMING_SNAKE_CASE = outputs else: raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''') if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} SCREAMING_SNAKE_CASE = [ {'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(a) ] if not _legacy: dict_scores.sort(key=lambda a: x["score"] , reverse=a) if top_k is not None: SCREAMING_SNAKE_CASE = dict_scores[:top_k] return dict_scores
73
1
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class _snake_case ( A__ ): def __init__( self , a , a , a) -> Dict: SCREAMING_SNAKE_CASE = dataset SCREAMING_SNAKE_CASE = process SCREAMING_SNAKE_CASE = params def __len__( self) -> Optional[int]: return len(self.dataset) def __getitem__( self , a) -> List[str]: SCREAMING_SNAKE_CASE = self.dataset[i] SCREAMING_SNAKE_CASE = self.process(a , **self.params) return processed class _snake_case ( A__ ): def __init__( self , a , a , a , a=None) -> int: SCREAMING_SNAKE_CASE = loader SCREAMING_SNAKE_CASE = infer SCREAMING_SNAKE_CASE = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = loader_batch_size # Internal bookkeeping SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None def __len__( self) -> str: return len(self.loader) def __iter__( self) -> str: SCREAMING_SNAKE_CASE = iter(self.loader) return self def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: if isinstance(self._loader_batch_data , torch.Tensor): # Batch data is simple tensor, just fetch the slice SCREAMING_SNAKE_CASE = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) SCREAMING_SNAKE_CASE = {} for k, element in self._loader_batch_data.items(): if isinstance(a , a): # Convert ModelOutput to tuple first SCREAMING_SNAKE_CASE = element.to_tuple() if isinstance(element[0] , torch.Tensor): SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element) elif isinstance(element[0] , np.ndarray): SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(a , a): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor): SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element) elif isinstance(element[0] , np.ndarray): SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element) continue if element is None: # This can happen for optional data that get passed around SCREAMING_SNAKE_CASE = None elif isinstance(element[self._loader_batch_index] , torch.Tensor): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers SCREAMING_SNAKE_CASE = element[self._loader_batch_index].unsqueeze(0) elif isinstance(element[self._loader_batch_index] , np.ndarray): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers SCREAMING_SNAKE_CASE = np.expand_dims(element[self._loader_batch_index] , 0) else: # This is typically a list, so no need to `unsqueeze`. SCREAMING_SNAKE_CASE = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 SCREAMING_SNAKE_CASE = self._loader_batch_data.__class__(a) self._loader_batch_index += 1 return result def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch SCREAMING_SNAKE_CASE = next(self.iterator) SCREAMING_SNAKE_CASE = self.infer(a , **self.params) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(a , torch.Tensor): SCREAMING_SNAKE_CASE = processed else: SCREAMING_SNAKE_CASE = list(processed.keys())[0] SCREAMING_SNAKE_CASE = processed[key] if isinstance(a , a): SCREAMING_SNAKE_CASE = len(a) else: SCREAMING_SNAKE_CASE = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. SCREAMING_SNAKE_CASE = observed_batch_size # Setting internal index to unwrap the batch SCREAMING_SNAKE_CASE = processed SCREAMING_SNAKE_CASE = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class _snake_case ( A__ ): def __init__( self , a , a , a , a=None) -> Optional[Any]: super().__init__(a , a , a) def __iter__( self) -> Tuple: SCREAMING_SNAKE_CASE = iter(self.loader) SCREAMING_SNAKE_CASE = None return self def SCREAMING_SNAKE_CASE__ ( self) -> str: if self.subiterator is None: SCREAMING_SNAKE_CASE = self.infer(next(self.iterator) , **self.params) try: # Try to return next item SCREAMING_SNAKE_CASE = next(self.subiterator) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators SCREAMING_SNAKE_CASE = self.infer(next(self.iterator) , **self.params) SCREAMING_SNAKE_CASE = next(self.subiterator) return processed class _snake_case ( A__ ): def __iter__( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE = iter(self.loader) return self def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: SCREAMING_SNAKE_CASE = self.loader_batch_item() SCREAMING_SNAKE_CASE = item.pop('is_last') accumulator.append(a) if is_last: return accumulator while not is_last: SCREAMING_SNAKE_CASE = self.infer(next(self.iterator) , **self.params) if self.loader_batch_size is not None: if isinstance(a , torch.Tensor): SCREAMING_SNAKE_CASE = processed else: SCREAMING_SNAKE_CASE = list(processed.keys())[0] SCREAMING_SNAKE_CASE = processed[key] if isinstance(a , a): SCREAMING_SNAKE_CASE = len(a) else: SCREAMING_SNAKE_CASE = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. SCREAMING_SNAKE_CASE = observed_batch_size SCREAMING_SNAKE_CASE = processed SCREAMING_SNAKE_CASE = 0 while self._loader_batch_index < self.loader_batch_size: SCREAMING_SNAKE_CASE = self.loader_batch_item() SCREAMING_SNAKE_CASE = item.pop('is_last') accumulator.append(a) if is_last: return accumulator else: SCREAMING_SNAKE_CASE = processed SCREAMING_SNAKE_CASE = item.pop('is_last') accumulator.append(a) return accumulator class _snake_case ( A__ ): def __init__( self , a , a) -> List[Any]: SCREAMING_SNAKE_CASE = dataset SCREAMING_SNAKE_CASE = key def __len__( self) -> List[Any]: return len(self.dataset) def __getitem__( self , a) -> Any: return self.dataset[i][self.key] class _snake_case ( A__ ): def __init__( self , a , a , a) -> List[str]: SCREAMING_SNAKE_CASE = dataset SCREAMING_SNAKE_CASE = keya SCREAMING_SNAKE_CASE = keya def __len__( self) -> Tuple: return len(self.dataset) def __getitem__( self , a) -> Union[str, Any]: return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
73
import heapq as hq import math from collections.abc import Iterator class _snake_case : def __init__( self , a) -> Optional[Any]: SCREAMING_SNAKE_CASE = str(id_) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = {} # {vertex:distance} def __lt__( self , a) -> Dict: return self.key < other.key def __repr__( self) -> Optional[Any]: return self.id def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]: self.neighbors.append(a) def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Tuple: SCREAMING_SNAKE_CASE = weight def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1]) graph[b - 1].add_neighbor(graph[a - 1]) # add the edges: graph[a - 1].add_edge(graph[b - 1] , _UpperCAmelCase) graph[b - 1].add_edge(graph[a - 1] , _UpperCAmelCase) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [] for u in graph: SCREAMING_SNAKE_CASE = math.inf SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = graph[:] while q: SCREAMING_SNAKE_CASE = min(_UpperCAmelCase) q.remove(_UpperCAmelCase) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): SCREAMING_SNAKE_CASE = u SCREAMING_SNAKE_CASE = u.edges[v.id] for i in range(1 , len(_UpperCAmelCase)): a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1)) return a def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): for u in graph: SCREAMING_SNAKE_CASE = math.inf SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = list(_UpperCAmelCase) hq.heapify(_UpperCAmelCase) while h: SCREAMING_SNAKE_CASE = hq.heappop(_UpperCAmelCase) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): SCREAMING_SNAKE_CASE = u SCREAMING_SNAKE_CASE = u.edges[v.id] hq.heapify(_UpperCAmelCase) for i in range(1 , len(_UpperCAmelCase)): yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1) def lowerCamelCase__ (): pass if __name__ == "__main__": import doctest doctest.testmod()
73
1
import json import os import torch from diffusers import UNetaDModel os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True) os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True) os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True) def lowerCamelCase__ (_UpperCAmelCase): if hor == 128: SCREAMING_SNAKE_CASE = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D') SCREAMING_SNAKE_CASE = (32, 128, 256) SCREAMING_SNAKE_CASE = ('UpResnetBlock1D', 'UpResnetBlock1D') elif hor == 32: SCREAMING_SNAKE_CASE = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D') SCREAMING_SNAKE_CASE = (32, 64, 128, 256) SCREAMING_SNAKE_CASE = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D') SCREAMING_SNAKE_CASE = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''') SCREAMING_SNAKE_CASE = model.state_dict() SCREAMING_SNAKE_CASE = { 'down_block_types': down_block_types, 'block_out_channels': block_out_channels, 'up_block_types': up_block_types, 'layers_per_block': 1, 'use_timestep_embedding': True, 'out_block_type': 'OutConv1DBlock', 'norm_num_groups': 8, 'downsample_each_block': False, 'in_channels': 14, 'out_channels': 14, 'extra_in_channels': 0, 'time_embedding_type': 'positional', 'flip_sin_to_cos': False, 'freq_shift': 1, 'sample_size': 6_5536, 'mid_block_type': 'MidResTemporalBlock1D', 'act_fn': 'mish', } SCREAMING_SNAKE_CASE = UNetaDModel(**_UpperCAmelCase) print(F'''length of state dict: {len(state_dict.keys())}''') print(F'''length of value function dict: {len(hf_value_function.state_dict().keys())}''') SCREAMING_SNAKE_CASE = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys())) for k, v in mapping.items(): SCREAMING_SNAKE_CASE = state_dict.pop(_UpperCAmelCase) hf_value_function.load_state_dict(_UpperCAmelCase) torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''') with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w') as f: json.dump(_UpperCAmelCase , _UpperCAmelCase) def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = { 'in_channels': 14, 'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'), 'up_block_types': (), 'out_block_type': 'ValueFunction', 'mid_block_type': 'ValueFunctionMidBlock1D', 'block_out_channels': (32, 64, 128, 256), 'layers_per_block': 1, 'downsample_each_block': True, 'sample_size': 6_5536, 'out_channels': 14, 'extra_in_channels': 0, 'time_embedding_type': 'positional', 'use_timestep_embedding': True, 'flip_sin_to_cos': False, 'freq_shift': 1, 'norm_num_groups': 8, 'act_fn': 'mish', } SCREAMING_SNAKE_CASE = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch') SCREAMING_SNAKE_CASE = model SCREAMING_SNAKE_CASE = UNetaDModel(**_UpperCAmelCase) print(F'''length of state dict: {len(state_dict.keys())}''') print(F'''length of value function dict: {len(hf_value_function.state_dict().keys())}''') SCREAMING_SNAKE_CASE = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys())) for k, v in mapping.items(): SCREAMING_SNAKE_CASE = state_dict.pop(_UpperCAmelCase) hf_value_function.load_state_dict(_UpperCAmelCase) torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin') with open('hub/hopper-medium-v2/value_function/config.json' , 'w') as f: json.dump(_UpperCAmelCase , _UpperCAmelCase) if __name__ == "__main__": unet(32) # unet(128) value_function()
73
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a_ : Optional[Any] = { 'configuration_mask2former': [ 'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Mask2FormerConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Union[str, Any] = ['Mask2FormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ 'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
73
1
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: SCREAMING_SNAKE_CASE = 10 def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = [1, 2, 3, 4] SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(a , self.block_size , 0) , a) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(a , self.block_size , 0) , a) def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(a , self.block_size , 0) , a) def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = process_story(a) self.assertEqual(a , []) def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE = '' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = process_story(a) self.assertEqual(a , []) self.assertEqual(a , []) def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = ( 'It was the year of Our Lord one thousand seven hundred and ' 'seventy-five\n\nSpiritual revelations were conceded to England ' 'at that favoured period, as at this.\n@highlight\n\nIt was the best of times' ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = process_story(a) SCREAMING_SNAKE_CASE = [ 'It was the year of Our Lord one thousand seven hundred and seventy-five.', 'Spiritual revelations were conceded to England at that favoured period, as at this.', ] self.assertEqual(a , a) SCREAMING_SNAKE_CASE = ['It was the best of times.'] self.assertEqual(a , a) def SCREAMING_SNAKE_CASE__ ( self) -> int: SCREAMING_SNAKE_CASE = torch.tensor([1, 2, 3, 4]) SCREAMING_SNAKE_CASE = torch.tensor([1, 1, 1, 1]) np.testing.assert_array_equal(build_mask(a , 0).numpy() , expected.numpy()) def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = torch.tensor([1, 2, 3, 4, 23, 23, 23]) SCREAMING_SNAKE_CASE = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(a , 23).numpy() , expected.numpy()) def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = torch.tensor([8, 2, 3, 4, 1, 1, 1]) SCREAMING_SNAKE_CASE = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(a , 1).numpy() , expected.numpy()) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: SCREAMING_SNAKE_CASE = 101 SCREAMING_SNAKE_CASE = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]]) SCREAMING_SNAKE_CASE = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]]) SCREAMING_SNAKE_CASE = compute_token_type_ids(a , a) np.testing.assert_array_equal(a , a)
73
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Dict = logging.get_logger(__name__) a_ : Union[str, Any] = { 'edbeeching/decision-transformer-gym-hopper-medium': ( 'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class _snake_case ( A__ ): _lowercase : Optional[Any] = '''decision_transformer''' _lowercase : str = ['''past_key_values'''] _lowercase : Union[str, Any] = { '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , a=17 , a=4 , a=128 , a=4096 , a=True , a=1 , a=1024 , a=3 , a=1 , a=None , a="relu" , a=0.1 , a=0.1 , a=0.1 , a=1E-5 , a=0.02 , a=True , a=True , a=5_0256 , a=5_0256 , a=False , a=False , **a , ) -> List[str]: SCREAMING_SNAKE_CASE = state_dim SCREAMING_SNAKE_CASE = act_dim SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = max_ep_len SCREAMING_SNAKE_CASE = action_tanh SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = n_positions SCREAMING_SNAKE_CASE = n_layer SCREAMING_SNAKE_CASE = n_head SCREAMING_SNAKE_CASE = n_inner SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = resid_pdrop SCREAMING_SNAKE_CASE = embd_pdrop SCREAMING_SNAKE_CASE = attn_pdrop SCREAMING_SNAKE_CASE = layer_norm_epsilon SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = scale_attn_weights SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx SCREAMING_SNAKE_CASE = reorder_and_upcast_attn SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = eos_token_id super().__init__(bos_token_id=a , eos_token_id=a , **a)
73
1
from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def lowerCamelCase__ (_UpperCAmelCase): return getitem, k def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return setitem, k, v def lowerCamelCase__ (_UpperCAmelCase): return delitem, k def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase): try: return fun(_UpperCAmelCase , *_UpperCAmelCase), None except Exception as e: return None, e a_ : Dict = ( _set('key_a', 'val_a'), _set('key_b', 'val_b'), ) a_ : Any = [ _set('key_a', 'val_a'), _set('key_a', 'val_b'), ] a_ : Any = [ _set('key_a', 'val_a'), _set('key_b', 'val_b'), _del('key_a'), _del('key_b'), _set('key_a', 'val_a'), _del('key_a'), ] a_ : str = [ _get('key_a'), _del('key_a'), _set('key_a', 'val_a'), _del('key_a'), _del('key_a'), _get('key_a'), ] a_ : Optional[int] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] a_ : Union[str, Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set('key_a', 'val_b'), ] @pytest.mark.parametrize( 'operations' , ( pytest.param(_add_items , id='add items'), pytest.param(_overwrite_items , id='overwrite items'), pytest.param(_delete_items , id='delete items'), pytest.param(_access_absent_items , id='access absent items'), pytest.param(_add_with_resize_up , id='add with resize up'), pytest.param(_add_with_resize_down , id='add with resize down'), ) , ) def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = HashMap(initial_block_size=4) SCREAMING_SNAKE_CASE = {} for _, (fun, *args) in enumerate(_UpperCAmelCase): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _run_operation(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _run_operation(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase) assert my_res == py_res assert str(_UpperCAmelCase) == str(_UpperCAmelCase) assert set(_UpperCAmelCase) == set(_UpperCAmelCase) assert len(_UpperCAmelCase) == len(_UpperCAmelCase) assert set(my.items()) == set(py.items()) def lowerCamelCase__ (): def is_public(_UpperCAmelCase) -> bool: return not name.startswith('_') SCREAMING_SNAKE_CASE = {name for name in dir({}) if is_public(_UpperCAmelCase)} SCREAMING_SNAKE_CASE = {name for name in dir(HashMap()) if is_public(_UpperCAmelCase)} assert dict_public_names > hash_public_names
73
import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ : Optional[int] = 16 a_ : Any = 32 def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 16): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('bert-base-cased') SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc') def tokenize_function(_UpperCAmelCase): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels') def collate_fn(_UpperCAmelCase): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE = 8 else: SCREAMING_SNAKE_CASE = None return tokenizer.pad( _UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase) SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == 'fp8') , ) return train_dataloader, eval_dataloader def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): # Initialize accelerator SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE = config['lr'] SCREAMING_SNAKE_CASE = int(config['num_epochs']) SCREAMING_SNAKE_CASE = int(config['seed']) SCREAMING_SNAKE_CASE = int(config['batch_size']) SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc') # If the batch size is too big we use gradient accumulation SCREAMING_SNAKE_CASE = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: SCREAMING_SNAKE_CASE = batch_size // MAX_GPU_BATCH_SIZE SCREAMING_SNAKE_CASE = MAX_GPU_BATCH_SIZE set_seed(_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE = model.to(accelerator.device) # Instantiate optimizer SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=_UpperCAmelCase) # Instantiate scheduler SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) # Now we train the model for epoch in range(_UpperCAmelCase): model.train() for step, batch in enumerate(_UpperCAmelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.loss SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps accelerator.backward(_UpperCAmelCase) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCAmelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch['labels'])) metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) SCREAMING_SNAKE_CASE = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , _UpperCAmelCase) def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Simple example of training script.') parser.add_argument( '--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.') SCREAMING_SNAKE_CASE = parser.parse_args() SCREAMING_SNAKE_CASE = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(_UpperCAmelCase , _UpperCAmelCase) if __name__ == "__main__": main()
73
1
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor a_ : Dict = logging.get_logger(__name__) class _snake_case ( A__ ): def __init__( self , *a , **a) -> None: warnings.warn( 'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use GLPNImageProcessor instead.' , a , ) super().__init__(*a , **a)
73
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a_ : int = { 'configuration_rag': ['RagConfig'], 'retrieval_rag': ['RagRetriever'], 'tokenization_rag': ['RagTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ 'RagModel', 'RagPreTrainedModel', 'RagSequenceForGeneration', 'RagTokenForGeneration', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Tuple = [ 'TFRagModel', 'TFRagPreTrainedModel', 'TFRagSequenceForGeneration', 'TFRagTokenForGeneration', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
73
1
from collections import namedtuple import requests from lxml import html # type: ignore a_ : int = namedtuple('covid_data', 'cases deaths recovered') def lowerCamelCase__ (_UpperCAmelCase = "https://www.worldometers.info/coronavirus/"): SCREAMING_SNAKE_CASE = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(_UpperCAmelCase).content).xpath(_UpperCAmelCase)) a_ : int = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}' print(fmt.format(*covid_stats()))
73
from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False): if radian_mode: return [magnitude * cos(_UpperCAmelCase), magnitude * sin(_UpperCAmelCase)] return [magnitude * cos(radians(_UpperCAmelCase)), magnitude * sin(radians(_UpperCAmelCase))] def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 10**-1): SCREAMING_SNAKE_CASE = cross(_UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = sum(_UpperCAmelCase) return abs(_UpperCAmelCase) < eps if __name__ == "__main__": # Test to check if it works a_ : int = array( [ polar_force(718.4, 1_80 - 30), polar_force(879.54, 45), polar_force(1_00, -90), ] ) a_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg a_ : Dict = array( [ polar_force(30 * 9.81, 15), polar_force(2_15, 1_80 - 45), polar_force(2_64, 90 - 30), ] ) a_ : Any = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg a_ : int = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]]) a_ : Optional[Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
73
1
import numpy as np def lowerCamelCase__ (_UpperCAmelCase): return 1 / (1 + np.exp(-vector)) def lowerCamelCase__ (_UpperCAmelCase): return vector * sigmoid(_UpperCAmelCase) if __name__ == "__main__": import doctest doctest.testmod()
73
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Optional[int] = logging.get_logger(__name__) a_ : int = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class _snake_case ( A__ ): _lowercase : Dict = '''cvt''' def __init__( self , a=3 , a=[7, 3, 3] , a=[4, 2, 2] , a=[2, 1, 1] , a=[64, 192, 384] , a=[1, 3, 6] , a=[1, 2, 10] , a=[4.0, 4.0, 4.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.1] , a=[True, True, True] , a=[False, False, True] , a=["dw_bn", "dw_bn", "dw_bn"] , a=[3, 3, 3] , a=[1, 1, 1] , a=[2, 2, 2] , a=[1, 1, 1] , a=[1, 1, 1] , a=0.02 , a=1E-12 , **a , ) -> List[Any]: super().__init__(**a) SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = patch_sizes SCREAMING_SNAKE_CASE = patch_stride SCREAMING_SNAKE_CASE = patch_padding SCREAMING_SNAKE_CASE = embed_dim SCREAMING_SNAKE_CASE = num_heads SCREAMING_SNAKE_CASE = depth SCREAMING_SNAKE_CASE = mlp_ratio SCREAMING_SNAKE_CASE = attention_drop_rate SCREAMING_SNAKE_CASE = drop_rate SCREAMING_SNAKE_CASE = drop_path_rate SCREAMING_SNAKE_CASE = qkv_bias SCREAMING_SNAKE_CASE = cls_token SCREAMING_SNAKE_CASE = qkv_projection_method SCREAMING_SNAKE_CASE = kernel_qkv SCREAMING_SNAKE_CASE = padding_kv SCREAMING_SNAKE_CASE = stride_kv SCREAMING_SNAKE_CASE = padding_q SCREAMING_SNAKE_CASE = stride_q SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps
73
1
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( A__ ): _lowercase : Dict = ['''image_processor''', '''tokenizer'''] _lowercase : Optional[Any] = '''AutoImageProcessor''' _lowercase : List[Any] = '''AutoTokenizer''' def __init__( self , a , a) -> Any: super().__init__(a , a) SCREAMING_SNAKE_CASE = self.image_processor def __call__( self , a=None , a=None , a=None , **a) -> Optional[int]: if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.') if text is not None: SCREAMING_SNAKE_CASE = self.tokenizer(a , return_tensors=a , **a) if images is not None: SCREAMING_SNAKE_CASE = self.image_processor(a , return_tensors=a , **a) if text is not None and images is not None: SCREAMING_SNAKE_CASE = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a) , tensor_type=a) def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> Tuple: return self.tokenizer.batch_decode(*a , **a) def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> List[Any]: return self.tokenizer.decode(*a , **a) @property def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: return ["input_ids", "attention_mask", "pixel_values"]
73
def lowerCamelCase__ (_UpperCAmelCase = 10 , _UpperCAmelCase = 1000 , _UpperCAmelCase = True): assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError('Invalid value for min_val or max_val (min_value < max_value)') return min_val if option else max_val def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return int((number_a + number_a) / 2) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) ), 'argument values must be type of "int"' if lower > higher: raise ValueError('argument value for lower and higher must be(lower > higher)') if not lower < to_guess < higher: raise ValueError( 'guess value must be within the range of lower and higher value') def answer(_UpperCAmelCase) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print('started...') SCREAMING_SNAKE_CASE = lower SCREAMING_SNAKE_CASE = higher SCREAMING_SNAKE_CASE = [] while True: SCREAMING_SNAKE_CASE = get_avg(_UpperCAmelCase , _UpperCAmelCase) last_numbers.append(_UpperCAmelCase) if answer(_UpperCAmelCase) == "low": SCREAMING_SNAKE_CASE = number elif answer(_UpperCAmelCase) == "high": SCREAMING_SNAKE_CASE = number else: break print(F'''guess the number : {last_numbers[-1]}''') print(F'''details : {last_numbers!s}''') def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = int(input('Enter lower value : ').strip()) SCREAMING_SNAKE_CASE = int(input('Enter high value : ').strip()) SCREAMING_SNAKE_CASE = int(input('Enter value to guess : ').strip()) guess_the_number(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) if __name__ == "__main__": main()
73
1
from math import isqrt, loga def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = [True] * max_number for i in range(2 , isqrt(max_number - 1) + 1): if is_prime[i]: for j in range(i**2 , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = False return [i for i in range(2 , _UpperCAmelCase) if is_prime[i]] def lowerCamelCase__ (_UpperCAmelCase = 80_0800 , _UpperCAmelCase = 80_0800): SCREAMING_SNAKE_CASE = degree * loga(_UpperCAmelCase) SCREAMING_SNAKE_CASE = int(_UpperCAmelCase) SCREAMING_SNAKE_CASE = calculate_prime_numbers(_UpperCAmelCase) SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left]) + prime_numbers[left] * loga(prime_numbers[right]) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f"""{solution() = }""")
73
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class _snake_case : def __init__( self , a , a=13 , a=7 , a=True , a=True , a=False , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Union[str, Any]: SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_input_mask SCREAMING_SNAKE_CASE = use_token_type_ids SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = type_sequence_label_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = num_choices SCREAMING_SNAKE_CASE = scope def SCREAMING_SNAKE_CASE__ ( self) -> Dict: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE = None if self.use_input_mask: SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length]) SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices) SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , use_stable_embedding=a , ) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Any: SCREAMING_SNAKE_CASE = OpenLlamaModel(config=a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a) SCREAMING_SNAKE_CASE = model(a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = OpenLlamaModel(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , ) SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , ) SCREAMING_SNAKE_CASE = model(a , attention_mask=a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> int: SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a) model.to(a) model.eval() # first forward pass SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , ) SCREAMING_SNAKE_CASE = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size) SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1) SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0] SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0] # select random slice SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a , a , atol=1E-3)) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) = config_and_inputs SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _snake_case ( A__ , A__ , A__ , unittest.TestCase ): _lowercase : List[Any] = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) _lowercase : str = (OpenLlamaForCausalLM,) if is_torch_available() else () _lowercase : List[str] = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) _lowercase : List[str] = False _lowercase : Optional[int] = False def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , hidden_size=37) def SCREAMING_SNAKE_CASE__ ( self) -> str: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*a) def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = input_dict['input_ids'] SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a) SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = 'single_label_classification' SCREAMING_SNAKE_CASE = input_dict['input_ids'] SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a) SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = 'multi_label_classification' SCREAMING_SNAKE_CASE = input_dict['input_ids'] SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a) SCREAMING_SNAKE_CASE = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test') def SCREAMING_SNAKE_CASE__ ( self) -> Any: pass @parameterized.expand([('linear',), ('dynamic',)]) def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = ids_tensor([1, 10] , config.vocab_size) SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights SCREAMING_SNAKE_CASE = OpenLlamaModel(a) original_model.to(a) original_model.eval() SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0} SCREAMING_SNAKE_CASE = OpenLlamaModel(a) scaled_model.to(a) scaled_model.eval() SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(a , a , atol=1E-5)) else: self.assertFalse(torch.allclose(a , a , atol=1E-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(a , a , atol=1E-5))
73
1
def lowerCamelCase__ (_UpperCAmelCase): if p < 2: raise ValueError('p should not be less than 2!') elif p == 2: return True SCREAMING_SNAKE_CASE = 4 SCREAMING_SNAKE_CASE = (1 << p) - 1 for _ in range(p - 2): SCREAMING_SNAKE_CASE = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
73
from __future__ import annotations a_ : str = [] def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): for i in range(len(_UpperCAmelCase)): if board[row][i] == 1: return False for i in range(len(_UpperCAmelCase)): if board[i][column] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , -1 , -1)): if board[i][j] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , len(_UpperCAmelCase))): if board[i][j] == 1: return False return True def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): if row >= len(_UpperCAmelCase): solution.append(_UpperCAmelCase) printboard(_UpperCAmelCase) print() return True for i in range(len(_UpperCAmelCase)): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = 1 solve(_UpperCAmelCase , row + 1) SCREAMING_SNAKE_CASE = 0 return False def lowerCamelCase__ (_UpperCAmelCase): for i in range(len(_UpperCAmelCase)): for j in range(len(_UpperCAmelCase)): if board[i][j] == 1: print('Q' , end=' ') else: print('.' , end=' ') print() # n=int(input("The no. of queens")) a_ : Tuple = 8 a_ : int = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
73
1
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed a_ : Any = 'true' def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=16): set_seed(42) SCREAMING_SNAKE_CASE = RegressionModel() SCREAMING_SNAKE_CASE = deepcopy(_UpperCAmelCase) SCREAMING_SNAKE_CASE = RegressionDataset(length=_UpperCAmelCase) SCREAMING_SNAKE_CASE = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase) model.to(accelerator.device) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase) return model, ddp_model, dataloader def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased') SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' , split='validation') def tokenize_function(_UpperCAmelCase): SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase) return outputs with accelerator.main_process_first(): SCREAMING_SNAKE_CASE = dataset.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels') def collate_fn(_UpperCAmelCase): if use_longest: return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt') return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt') return DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=16) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = Accelerator(dispatch_batches=_UpperCAmelCase , split_batches=_UpperCAmelCase) SCREAMING_SNAKE_CASE = get_dataloader(_UpperCAmelCase , not dispatch_batches) SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained( 'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [] for batch in dataloader: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((logit, target)) logits_and_targets.append((logit, target)) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], [] for logit, targ in logits_and_targets: logits.append(_UpperCAmelCase) targs.append(_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.cat(_UpperCAmelCase), torch.cat(_UpperCAmelCase) return logits, targs def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=16): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_basic_setup(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_predictions(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) assert ( len(_UpperCAmelCase) == num_samples ), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCAmelCase)}''' def lowerCamelCase__ (_UpperCAmelCase = False , _UpperCAmelCase = False): SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc') SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_mrpc_setup(_UpperCAmelCase , _UpperCAmelCase) # First do baseline SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['no'] model.to(_UpperCAmelCase) model.eval() for batch in dataloader: batch.to(_UpperCAmelCase) with torch.inference_mode(): SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1) metric.add_batch(predictions=_UpperCAmelCase , references=batch['labels']) SCREAMING_SNAKE_CASE = metric.compute() # Then do distributed SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['ddp'] model.eval() for batch in dataloader: with torch.inference_mode(): SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1) SCREAMING_SNAKE_CASE = batch['labels'] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((preds, references)) metric.add_batch(predictions=_UpperCAmelCase , references=_UpperCAmelCase) SCREAMING_SNAKE_CASE = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key]), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('**Testing gather_for_metrics**') for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''') test_mrpc(_UpperCAmelCase , _UpperCAmelCase) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test torch metrics**') for split_batches in [True, False]: for dispatch_batches in [True, False]: SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase) if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''') test_torch_metrics(_UpperCAmelCase , 99) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test last batch is not dropped when perfectly divisible**') SCREAMING_SNAKE_CASE = Accelerator() test_torch_metrics(_UpperCAmelCase , 512) accelerator.state._reset_state() def lowerCamelCase__ (_UpperCAmelCase): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
73
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _snake_case ( A__ , A__ , unittest.TestCase ): _lowercase : List[Any] = StableDiffusionDiffEditPipeline _lowercase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} _lowercase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} _lowercase : List[str] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _lowercase : List[str] = frozenset([] ) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: torch.manual_seed(0) SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , ) SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , ) SCREAMING_SNAKE_CASE = DDIMInverseScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_zero=a , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) SCREAMING_SNAKE_CASE = CLIPTextModel(a) SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') SCREAMING_SNAKE_CASE = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]: SCREAMING_SNAKE_CASE = floats_tensor((1, 16, 16) , rng=random.Random(a)).to(a) SCREAMING_SNAKE_CASE = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a)).to(a) if str(a).startswith('mps'): SCREAMING_SNAKE_CASE = torch.manual_seed(a) else: SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a) SCREAMING_SNAKE_CASE = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]: SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a) SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0] SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB') if str(a).startswith('mps'): SCREAMING_SNAKE_CASE = torch.manual_seed(a) else: SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a) SCREAMING_SNAKE_CASE = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> Optional[int]: SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a) SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0] SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB') if str(a).startswith('mps'): SCREAMING_SNAKE_CASE = torch.manual_seed(a) else: SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a) SCREAMING_SNAKE_CASE = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: if not hasattr(self.pipeline_class , '_optional_components'): return SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(a , a , a) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components}) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a) SCREAMING_SNAKE_CASE = pipe(**a)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(a) SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(a) pipe_loaded.to(a) pipe_loaded.set_progress_bar_config(disable=a) for optional_component in pipe._optional_components: self.assertTrue( getattr(a , a) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a) SCREAMING_SNAKE_CASE = pipe_loaded(**a)[0] SCREAMING_SNAKE_CASE = np.abs(output - output_loaded).max() self.assertLess(a , 1E-4) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = 'cpu' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = self.get_dummy_mask_inputs(a) SCREAMING_SNAKE_CASE = pipe.generate_mask(**a) SCREAMING_SNAKE_CASE = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16)) SCREAMING_SNAKE_CASE = np.array([0] * 9) SCREAMING_SNAKE_CASE = np.abs(mask_slice.flatten() - expected_slice).max() self.assertLessEqual(a , 1E-3) self.assertEqual(mask[0, -3, -4] , 0) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = 'cpu' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(a) SCREAMING_SNAKE_CASE = pipe.invert(**a).images SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3)) SCREAMING_SNAKE_CASE = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(a , 1E-3) def SCREAMING_SNAKE_CASE__ ( self) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=5E-3) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = 'cpu' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'} SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler(**a) SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler(**a) SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(a) SCREAMING_SNAKE_CASE = pipe.invert(**a).images SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3)) SCREAMING_SNAKE_CASE = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(a , 1E-3) @require_torch_gpu @slow class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self) -> Any: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def SCREAMING_SNAKE_CASE__ ( cls) -> List[Any]: SCREAMING_SNAKE_CASE = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png') SCREAMING_SNAKE_CASE = raw_image.convert('RGB').resize((768, 768)) SCREAMING_SNAKE_CASE = raw_image def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = torch.manual_seed(0) SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa) SCREAMING_SNAKE_CASE = DDIMScheduler.from_config(pipe.scheduler.config) SCREAMING_SNAKE_CASE = DDIMInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = 'a bowl of fruit' SCREAMING_SNAKE_CASE = 'a bowl of pears' SCREAMING_SNAKE_CASE = pipe.generate_mask( image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , ) SCREAMING_SNAKE_CASE = pipe.invert( prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a).latents SCREAMING_SNAKE_CASE = pipe( prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , output_type='numpy' , ).images[0] SCREAMING_SNAKE_CASE = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png').resize((768, 768))) / 255 ) assert np.abs((expected_image - image).max()) < 5E-1 def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = torch.manual_seed(0) SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa) SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = 'a bowl of fruit' SCREAMING_SNAKE_CASE = 'a bowl of pears' SCREAMING_SNAKE_CASE = pipe.generate_mask( image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , ) SCREAMING_SNAKE_CASE = pipe.invert( prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a , num_inference_steps=25 , ).latents SCREAMING_SNAKE_CASE = pipe( prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0] SCREAMING_SNAKE_CASE = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png').resize((768, 768))) / 255 ) assert np.abs((expected_image - image).max()) < 5E-1
73
1
import numpy as np import datasets a_ : str = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' a_ : str = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' a_ : Tuple = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): def SCREAMING_SNAKE_CASE__ ( self) -> int: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'X': datasets.Sequence(datasets.Value('float' , id='sequence') , id='X'), }) , ) def SCREAMING_SNAKE_CASE__ ( self , a , a) -> str: # convert to numpy arrays SCREAMING_SNAKE_CASE = np.array(a) SCREAMING_SNAKE_CASE = np.array(a) # Assert that arrays are 2D if len(X.shape) != 2: raise ValueError('Expected `X` to be a 2D vector') if len(reference_distribution.shape) != 2: raise ValueError('Expected `reference_distribution` to be a 2D vector') if reference_distribution.shape[0] < 2: raise ValueError( 'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension') # Get mahalanobis distance for each prediction SCREAMING_SNAKE_CASE = X - np.mean(a) SCREAMING_SNAKE_CASE = np.cov(reference_distribution.T) try: SCREAMING_SNAKE_CASE = np.linalg.inv(a) except np.linalg.LinAlgError: SCREAMING_SNAKE_CASE = np.linalg.pinv(a) SCREAMING_SNAKE_CASE = np.dot(a , a) SCREAMING_SNAKE_CASE = np.dot(a , X_minus_mu.T).diagonal() return {"mahalanobis": mahal_dist}
73
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : List[str] = logging.get_logger(__name__) a_ : Any = { 'microsoft/unispeech-large-1500h-cv': ( 'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class _snake_case ( A__ ): _lowercase : Optional[int] = '''unispeech''' def __init__( self , a=32 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=0.1 , a=0.0 , a=0.0 , a=0.1 , a=0.1 , a=0.02 , a=1E-5 , a="group" , a="gelu" , a=(512, 512, 512, 512, 512, 512, 512) , a=(5, 2, 2, 2, 2, 2, 2) , a=(10, 3, 3, 3, 3, 2, 2) , a=False , a=128 , a=16 , a=False , a=True , a=0.05 , a=10 , a=2 , a=0.0 , a=10 , a=0 , a=320 , a=2 , a=0.1 , a=100 , a=256 , a=256 , a=0.1 , a="mean" , a=False , a=False , a=256 , a=80 , a=0 , a=1 , a=2 , a=0.5 , **a , ) -> Optional[int]: super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = feat_extract_norm SCREAMING_SNAKE_CASE = feat_extract_activation SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = conv_bias SCREAMING_SNAKE_CASE = num_conv_pos_embeddings SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups SCREAMING_SNAKE_CASE = len(self.conv_dim) SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = activation_dropout SCREAMING_SNAKE_CASE = feat_proj_dropout SCREAMING_SNAKE_CASE = final_dropout SCREAMING_SNAKE_CASE = layerdrop SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = num_ctc_classes SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = do_stable_layer_norm SCREAMING_SNAKE_CASE = use_weighted_layer_sum SCREAMING_SNAKE_CASE = classifier_proj_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' f''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE = apply_spec_augment SCREAMING_SNAKE_CASE = mask_time_prob SCREAMING_SNAKE_CASE = mask_time_length SCREAMING_SNAKE_CASE = mask_time_min_masks SCREAMING_SNAKE_CASE = mask_feature_prob SCREAMING_SNAKE_CASE = mask_feature_length SCREAMING_SNAKE_CASE = mask_feature_min_masks # parameters for pretraining with codevector quantized representations SCREAMING_SNAKE_CASE = num_codevectors_per_group SCREAMING_SNAKE_CASE = num_codevector_groups SCREAMING_SNAKE_CASE = contrastive_logits_temperature SCREAMING_SNAKE_CASE = feat_quantizer_dropout SCREAMING_SNAKE_CASE = num_negatives SCREAMING_SNAKE_CASE = codevector_dim SCREAMING_SNAKE_CASE = proj_codevector_dim SCREAMING_SNAKE_CASE = diversity_loss_weight # ctc loss SCREAMING_SNAKE_CASE = ctc_loss_reduction SCREAMING_SNAKE_CASE = ctc_zero_infinity # pretraining loss SCREAMING_SNAKE_CASE = replace_prob @property def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: return functools.reduce(operator.mul , self.conv_stride , 1)
73
1
from __future__ import annotations def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): if len(_UpperCAmelCase) < k or k < 0: raise ValueError('Invalid Input') SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = sum(array[:k]) for i in range(len(_UpperCAmelCase) - k): SCREAMING_SNAKE_CASE = current_sum - array[i] + array[i + k] SCREAMING_SNAKE_CASE = max(_UpperCAmelCase , _UpperCAmelCase) return max_sum if __name__ == "__main__": from doctest import testmod from random import randint testmod() a_ : List[Any] = [randint(-10_00, 10_00) for i in range(1_00)] a_ : int = randint(0, 1_10) print(f"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
73
import argparse import collections import json import os import re import string import sys import numpy as np a_ : Optional[Any] = re.compile(R'\b(a|an|the)\b', re.UNICODE) a_ : List[str] = None def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.') parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.') parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.') parser.add_argument( '--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).') parser.add_argument( '--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.') parser.add_argument( '--na-prob-thresh' , '-t' , type=_UpperCAmelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , ) parser.add_argument( '--out-image-dir' , '-p' , metavar='out_images' , default=_UpperCAmelCase , help='Save precision-recall curves to directory.') parser.add_argument('--verbose' , '-v' , action='store_true') if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args() def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: SCREAMING_SNAKE_CASE = bool(qa['answers']['text']) return qid_to_has_ans def lowerCamelCase__ (_UpperCAmelCase): def remove_articles(_UpperCAmelCase): return ARTICLES_REGEX.sub(' ' , _UpperCAmelCase) def white_space_fix(_UpperCAmelCase): return " ".join(text.split()) def remove_punc(_UpperCAmelCase): SCREAMING_SNAKE_CASE = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(_UpperCAmelCase): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase)))) def lowerCamelCase__ (_UpperCAmelCase): if not s: return [] return normalize_answer(_UpperCAmelCase).split() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return int(normalize_answer(_UpperCAmelCase) == normalize_answer(_UpperCAmelCase)) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase) SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase) SCREAMING_SNAKE_CASE = collections.Counter(_UpperCAmelCase) & collections.Counter(_UpperCAmelCase) SCREAMING_SNAKE_CASE = sum(common.values()) if len(_UpperCAmelCase) == 0 or len(_UpperCAmelCase) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks) if num_same == 0: return 0 SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase) SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase) SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: SCREAMING_SNAKE_CASE = qa['id'] SCREAMING_SNAKE_CASE = [t for t in qa['answers']['text'] if normalize_answer(_UpperCAmelCase)] if not gold_answers: # For unanswerable questions, only correct answer is empty string SCREAMING_SNAKE_CASE = [''] if qid not in preds: print(F'''Missing prediction for {qid}''') continue SCREAMING_SNAKE_CASE = preds[qid] # Take max over all gold answers SCREAMING_SNAKE_CASE = max(compute_exact(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers) SCREAMING_SNAKE_CASE = max(compute_fa(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers) return exact_scores, fa_scores def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = {} for qid, s in scores.items(): SCREAMING_SNAKE_CASE = na_probs[qid] > na_prob_thresh if pred_na: SCREAMING_SNAKE_CASE = float(not qid_to_has_ans[qid]) else: SCREAMING_SNAKE_CASE = s return new_scores def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None): if not qid_list: SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) return collections.OrderedDict( [ ('exact', 1_00.0 * sum(exact_scores.values()) / total), ('f1', 1_00.0 * sum(fa_scores.values()) / total), ('total', total), ]) else: SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) return collections.OrderedDict( [ ('exact', 1_00.0 * sum(exact_scores[k] for k in qid_list) / total), ('f1', 1_00.0 * sum(fa_scores[k] for k in qid_list) / total), ('total', total), ]) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): for k in new_eval: SCREAMING_SNAKE_CASE = new_eval[k] def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): plt.step(_UpperCAmelCase , _UpperCAmelCase , color='b' , alpha=0.2 , where='post') plt.fill_between(_UpperCAmelCase , _UpperCAmelCase , step='post' , alpha=0.2 , color='b') plt.xlabel('Recall') plt.ylabel('Precision') plt.xlim([0.0, 1.05]) plt.ylim([0.0, 1.05]) plt.title(_UpperCAmelCase) plt.savefig(_UpperCAmelCase) plt.clf() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None): SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k]) SCREAMING_SNAKE_CASE = 0.0 SCREAMING_SNAKE_CASE = 1.0 SCREAMING_SNAKE_CASE = 0.0 SCREAMING_SNAKE_CASE = [1.0] SCREAMING_SNAKE_CASE = [0.0] SCREAMING_SNAKE_CASE = 0.0 for i, qid in enumerate(_UpperCAmelCase): if qid_to_has_ans[qid]: true_pos += scores[qid] SCREAMING_SNAKE_CASE = true_pos / float(i + 1) SCREAMING_SNAKE_CASE = true_pos / float(_UpperCAmelCase) if i == len(_UpperCAmelCase) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(_UpperCAmelCase) recalls.append(_UpperCAmelCase) if out_image: plot_pr_curve(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) return {"ap": 1_00.0 * avg_prec} def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): if out_image_dir and not os.path.exists(_UpperCAmelCase): os.makedirs(_UpperCAmelCase) SCREAMING_SNAKE_CASE = sum(1 for v in qid_to_has_ans.values() if v) if num_true_pos == 0: return SCREAMING_SNAKE_CASE = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_exact.png') , title='Precision-Recall curve for Exact Match score' , ) SCREAMING_SNAKE_CASE = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_f1.png') , title='Precision-Recall curve for F1 score' , ) SCREAMING_SNAKE_CASE = {k: float(_UpperCAmelCase) for k, v in qid_to_has_ans.items()} SCREAMING_SNAKE_CASE = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_oracle.png') , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , ) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_exact') merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_f1') merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_oracle') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): if not qid_list: return SCREAMING_SNAKE_CASE = [na_probs[k] for k in qid_list] SCREAMING_SNAKE_CASE = np.ones_like(_UpperCAmelCase) / float(len(_UpperCAmelCase)) plt.hist(_UpperCAmelCase , weights=_UpperCAmelCase , bins=20 , range=(0.0, 1.0)) plt.xlabel('Model probability of no-answer') plt.ylabel('Proportion of dataset') plt.title(F'''Histogram of no-answer probability: {name}''') plt.savefig(os.path.join(_UpperCAmelCase , F'''na_prob_hist_{name}.png''')) plt.clf() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) SCREAMING_SNAKE_CASE = num_no_ans SCREAMING_SNAKE_CASE = cur_score SCREAMING_SNAKE_CASE = 0.0 SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k]) for i, qid in enumerate(_UpperCAmelCase): if qid not in scores: continue if qid_to_has_ans[qid]: SCREAMING_SNAKE_CASE = scores[qid] else: if preds[qid]: SCREAMING_SNAKE_CASE = -1 else: SCREAMING_SNAKE_CASE = 0 cur_score += diff if cur_score > best_score: SCREAMING_SNAKE_CASE = cur_score SCREAMING_SNAKE_CASE = na_probs[qid] return 1_00.0 * best_score / len(_UpperCAmelCase), best_thresh def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = best_exact SCREAMING_SNAKE_CASE = exact_thresh SCREAMING_SNAKE_CASE = best_fa SCREAMING_SNAKE_CASE = fa_thresh def lowerCamelCase__ (): with open(OPTS.data_file) as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) SCREAMING_SNAKE_CASE = dataset_json['data'] with open(OPTS.pred_file) as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) if OPTS.na_prob_file: with open(OPTS.na_prob_file) as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) else: SCREAMING_SNAKE_CASE = {k: 0.0 for k in preds} SCREAMING_SNAKE_CASE = make_qid_to_has_ans(_UpperCAmelCase) # maps qid to True/False SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if v] SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if not v] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_raw_scores(_UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh) SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh) SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase) if has_ans_qids: SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'HasAns') if no_ans_qids: SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'NoAns') if OPTS.na_prob_file: find_all_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir) histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'hasAns') histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'noAns') if OPTS.out_file: with open(OPTS.out_file , 'w') as f: json.dump(_UpperCAmelCase , _UpperCAmelCase) else: print(json.dumps(_UpperCAmelCase , indent=2)) if __name__ == "__main__": a_ : Any = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt main()
73
1
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = 1.5 SCREAMING_SNAKE_CASE = int(factor * num_class_images) SCREAMING_SNAKE_CASE = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=_UpperCAmelCase , aesthetic_weight=0.1) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=_UpperCAmelCase) if len(list(Path(F'''{class_data_dir}/images''').iterdir())) >= num_class_images: return while True: SCREAMING_SNAKE_CASE = client.query(text=_UpperCAmelCase) if len(_UpperCAmelCase) >= factor * num_class_images or num_images > 1e4: break else: SCREAMING_SNAKE_CASE = int(factor * num_images) SCREAMING_SNAKE_CASE = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=_UpperCAmelCase , aesthetic_weight=0.1 , ) SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = tqdm(desc='downloading real regularization images' , total=_UpperCAmelCase) with open(F'''{class_data_dir}/caption.txt''' , 'w') as fa, open(F'''{class_data_dir}/urls.txt''' , 'w') as fa, open( F'''{class_data_dir}/images.txt''' , 'w') as fa: while total < num_class_images: SCREAMING_SNAKE_CASE = class_images[count] count += 1 try: SCREAMING_SNAKE_CASE = requests.get(images['url']) if img.status_code == 200: SCREAMING_SNAKE_CASE = Image.open(BytesIO(img.content)) with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb') as f: f.write(img.content) fa.write(images['caption'] + '\n') fa.write(images['url'] + '\n') fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n') total += 1 pbar.update(1) else: continue except Exception: continue return def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = argparse.ArgumentParser('' , add_help=_UpperCAmelCase) parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=_UpperCAmelCase , type=_UpperCAmelCase) parser.add_argument('--class_data_dir' , help='path to save images' , required=_UpperCAmelCase , type=_UpperCAmelCase) parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=_UpperCAmelCase) return parser.parse_args() if __name__ == "__main__": a_ : int = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
73
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor a_ : Dict = logging.get_logger(__name__) class _snake_case ( A__ ): def __init__( self , *a , **a) -> None: warnings.warn( 'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use GLPNImageProcessor instead.' , a , ) super().__init__(*a , **a)
73
1
from __future__ import annotations from collections.abc import Sequence from typing import Literal def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = list(_UpperCAmelCase) SCREAMING_SNAKE_CASE = list(_UpperCAmelCase) SCREAMING_SNAKE_CASE = 0 for i in range(len(_UpperCAmelCase)): if lista[i] != lista[i]: count += 1 SCREAMING_SNAKE_CASE = '_' if count > 1: return False else: return "".join(_UpperCAmelCase) def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = [] while True: SCREAMING_SNAKE_CASE = ['$'] * len(_UpperCAmelCase) SCREAMING_SNAKE_CASE = [] for i in range(len(_UpperCAmelCase)): for j in range(i + 1 , len(_UpperCAmelCase)): SCREAMING_SNAKE_CASE = compare_string(binary[i] , binary[j]) if k is False: SCREAMING_SNAKE_CASE = '*' SCREAMING_SNAKE_CASE = '*' temp.append('X') for i in range(len(_UpperCAmelCase)): if checka[i] == "$": pi.append(binary[i]) if len(_UpperCAmelCase) == 0: return pi SCREAMING_SNAKE_CASE = list(set(_UpperCAmelCase)) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [] for minterm in minterms: SCREAMING_SNAKE_CASE = '' for _ in range(_UpperCAmelCase): SCREAMING_SNAKE_CASE = str(minterm % 2) + string minterm //= 2 temp.append(_UpperCAmelCase) return temp def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = list(_UpperCAmelCase) SCREAMING_SNAKE_CASE = list(_UpperCAmelCase) SCREAMING_SNAKE_CASE = 0 for i in range(len(_UpperCAmelCase)): if lista[i] != lista[i]: count_n += 1 return count_n == count def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [0] * len(_UpperCAmelCase) for i in range(len(chart[0])): SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = -1 for j in range(len(_UpperCAmelCase)): if chart[j][i] == 1: count += 1 SCREAMING_SNAKE_CASE = j if count == 1: SCREAMING_SNAKE_CASE = 1 for i in range(len(_UpperCAmelCase)): if select[i] == 1: for j in range(len(chart[0])): if chart[i][j] == 1: for k in range(len(_UpperCAmelCase)): SCREAMING_SNAKE_CASE = 0 temp.append(prime_implicants[i]) while True: SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = 0 for i in range(len(_UpperCAmelCase)): SCREAMING_SNAKE_CASE = chart[i].count(1) if count_n > max_n: SCREAMING_SNAKE_CASE = count_n SCREAMING_SNAKE_CASE = i if max_n == 0: return temp temp.append(prime_implicants[rem]) for i in range(len(chart[0])): if chart[rem][i] == 1: for j in range(len(_UpperCAmelCase)): SCREAMING_SNAKE_CASE = 0 def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [[0 for x in range(len(_UpperCAmelCase))] for x in range(len(_UpperCAmelCase))] for i in range(len(_UpperCAmelCase)): SCREAMING_SNAKE_CASE = prime_implicants[i].count('_') for j in range(len(_UpperCAmelCase)): if is_for_table(prime_implicants[i] , binary[j] , _UpperCAmelCase): SCREAMING_SNAKE_CASE = 1 return chart def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = int(input('Enter the no. of variables\n')) SCREAMING_SNAKE_CASE = [ float(_UpperCAmelCase) for x in input( 'Enter the decimal representation of Minterms \'Spaces Separated\'\n').split() ] SCREAMING_SNAKE_CASE = decimal_to_binary(_UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = check(_UpperCAmelCase) print('Prime Implicants are:') print(_UpperCAmelCase) SCREAMING_SNAKE_CASE = prime_implicant_chart(_UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = selection(_UpperCAmelCase , _UpperCAmelCase) print('Essential Prime Implicants are:') print(_UpperCAmelCase) if __name__ == "__main__": import doctest doctest.testmod() main()
73
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class _snake_case ( unittest.TestCase , A__ ): def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = load_tool('text-classification') self.tool.setup() SCREAMING_SNAKE_CASE = load_tool('text-classification' , remote=a) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = self.tool('That\'s quite cool' , ['positive', 'negative']) self.assertEqual(a , 'positive') def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: SCREAMING_SNAKE_CASE = self.remote_tool('That\'s quite cool' , ['positive', 'negative']) self.assertEqual(a , 'positive') def SCREAMING_SNAKE_CASE__ ( self) -> int: SCREAMING_SNAKE_CASE = self.tool(text='That\'s quite cool' , labels=['positive', 'negative']) self.assertEqual(a , 'positive') def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative']) self.assertEqual(a , 'positive')
73
1
from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING a_ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(A__ ) class _snake_case ( A__ ): def __init__( self , *a , **a) -> Tuple: super().__init__(*a , **a) requires_backends(self , 'decord') self.check_model_type(a) def SCREAMING_SNAKE_CASE__ ( self , a=None , a=None , a=None) -> Any: SCREAMING_SNAKE_CASE = {} if frame_sampling_rate is not None: SCREAMING_SNAKE_CASE = frame_sampling_rate if num_frames is not None: SCREAMING_SNAKE_CASE = num_frames SCREAMING_SNAKE_CASE = {} if top_k is not None: SCREAMING_SNAKE_CASE = top_k return preprocess_params, {}, postprocess_params def __call__( self , a , **a) -> Union[str, Any]: return super().__call__(a , **a) def SCREAMING_SNAKE_CASE__ ( self , a , a=None , a=1) -> List[str]: if num_frames is None: SCREAMING_SNAKE_CASE = self.model.config.num_frames if video.startswith('http://') or video.startswith('https://'): SCREAMING_SNAKE_CASE = BytesIO(requests.get(a).content) SCREAMING_SNAKE_CASE = VideoReader(a) videoreader.seek(0) SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = num_frames * frame_sampling_rate - 1 SCREAMING_SNAKE_CASE = np.linspace(a , a , num=a , dtype=np.intaa) SCREAMING_SNAKE_CASE = videoreader.get_batch(a).asnumpy() SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = self.image_processor(a , return_tensors=self.framework) return model_inputs def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict: SCREAMING_SNAKE_CASE = self.model(**a) return model_outputs def SCREAMING_SNAKE_CASE__ ( self , a , a=5) -> Optional[Any]: if top_k > self.model.config.num_labels: SCREAMING_SNAKE_CASE = self.model.config.num_labels if self.framework == "pt": SCREAMING_SNAKE_CASE = model_outputs.logits.softmax(-1)[0] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = probs.topk(a) else: raise ValueError(f'''Unsupported framework: {self.framework}''') SCREAMING_SNAKE_CASE = scores.tolist() SCREAMING_SNAKE_CASE = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a , a)]
73
import sys import turtle def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): my_pen.up() my_pen.goto(vertexa[0] , vertexa[1]) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1]) my_pen.goto(vertexa[0] , vertexa[1]) my_pen.goto(vertexa[0] , vertexa[1]) if depth == 0: return triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1) triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1) triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( 'Correct format for using this script: ' 'python fractals.py <int:depth_for_fractal>' ) a_ : Any = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('red') a_ : str = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
73
1
import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope='session') def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = 10 SCREAMING_SNAKE_CASE = datasets.Features( { 'tokens': datasets.Sequence(datasets.Value('string')), 'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'])), 'answers': datasets.Sequence( { 'text': datasets.Value('string'), 'answer_start': datasets.Value('int32'), }), 'id': datasets.Value('int64'), }) SCREAMING_SNAKE_CASE = datasets.Dataset.from_dict( { 'tokens': [['foo'] * 5] * n, 'labels': [[1] * 5] * n, 'answers': [{'answer_start': [97], 'text': ['1976']}] * 10, 'id': list(range(_UpperCAmelCase)), } , features=_UpperCAmelCase , ) return dataset @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'file.arrow') dataset.map(cache_file_name=_UpperCAmelCase) return filename # FILE_CONTENT + files a_ : Tuple = '\\n Text data.\n Second line of data.' @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.txt' SCREAMING_SNAKE_CASE = FILE_CONTENT with open(_UpperCAmelCase , 'w') as f: f.write(_UpperCAmelCase) return filename @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): import bza SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.txt.bz2' SCREAMING_SNAKE_CASE = bytes(_UpperCAmelCase , 'utf-8') with bza.open(_UpperCAmelCase , 'wb') as f: f.write(_UpperCAmelCase) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): import gzip SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'file.txt.gz') SCREAMING_SNAKE_CASE = bytes(_UpperCAmelCase , 'utf-8') with gzip.open(_UpperCAmelCase , 'wb') as f: f.write(_UpperCAmelCase) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): if datasets.config.LZ4_AVAILABLE: import lza.frame SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.txt.lz4' SCREAMING_SNAKE_CASE = bytes(_UpperCAmelCase , 'utf-8') with lza.frame.open(_UpperCAmelCase , 'wb') as f: f.write(_UpperCAmelCase) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): if datasets.config.PY7ZR_AVAILABLE: import pyazr SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.txt.7z' with pyazr.SevenZipFile(_UpperCAmelCase , 'w') as archive: archive.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase)) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): import tarfile SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.txt.tar' with tarfile.TarFile(_UpperCAmelCase , 'w') as f: f.add(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase)) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): import lzma SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.txt.xz' SCREAMING_SNAKE_CASE = bytes(_UpperCAmelCase , 'utf-8') with lzma.open(_UpperCAmelCase , 'wb') as f: f.write(_UpperCAmelCase) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): import zipfile SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.txt.zip' with zipfile.ZipFile(_UpperCAmelCase , 'w') as f: f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase)) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.txt.zst' SCREAMING_SNAKE_CASE = bytes(_UpperCAmelCase , 'utf-8') with zstd.open(_UpperCAmelCase , 'wb') as f: f.write(_UpperCAmelCase) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.xml' SCREAMING_SNAKE_CASE = textwrap.dedent( '\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>') with open(_UpperCAmelCase , 'w') as f: f.write(_UpperCAmelCase) return filename a_ : Tuple = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] a_ : Any = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] a_ : List[Any] = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } a_ : Dict = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] a_ : List[Any] = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope='session') def lowerCamelCase__ (): return DATA_DICT_OF_LISTS @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = datasets.Dataset.from_dict(_UpperCAmelCase) SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset.arrow') dataset.map(cache_file_name=_UpperCAmelCase) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset.sqlite') with contextlib.closing(sqlitea.connect(_UpperCAmelCase)) as con: SCREAMING_SNAKE_CASE = con.cursor() cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)') for item in DATA: cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values())) con.commit() return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset.csv') with open(_UpperCAmelCase , 'w' , newline='') as f: SCREAMING_SNAKE_CASE = csv.DictWriter(_UpperCAmelCase , fieldnames=['col_1', 'col_2', 'col_3']) writer.writeheader() for item in DATA: writer.writerow(_UpperCAmelCase) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset2.csv') with open(_UpperCAmelCase , 'w' , newline='') as f: SCREAMING_SNAKE_CASE = csv.DictWriter(_UpperCAmelCase , fieldnames=['col_1', 'col_2', 'col_3']) writer.writeheader() for item in DATA: writer.writerow(_UpperCAmelCase) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): import bza SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset.csv.bz2' with open(_UpperCAmelCase , 'rb') as f: SCREAMING_SNAKE_CASE = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(_UpperCAmelCase , 'wb') as f: f.write(_UpperCAmelCase) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset.csv.zip' with zipfile.ZipFile(_UpperCAmelCase , 'w') as f: f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase)) f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase)) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset.csv.zip' with zipfile.ZipFile(_UpperCAmelCase , 'w') as f: f.write(_UpperCAmelCase , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV'))) f.write(_UpperCAmelCase , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV'))) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset_with_dir.csv.zip' with zipfile.ZipFile(_UpperCAmelCase , 'w') as f: f.write(_UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_UpperCAmelCase))) f.write(_UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_UpperCAmelCase))) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset.parquet') SCREAMING_SNAKE_CASE = pa.schema( { 'col_1': pa.string(), 'col_2': pa.intaa(), 'col_3': pa.floataa(), }) with open(_UpperCAmelCase , 'wb') as f: SCREAMING_SNAKE_CASE = pq.ParquetWriter(_UpperCAmelCase , schema=_UpperCAmelCase) SCREAMING_SNAKE_CASE = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_UpperCAmelCase))] for k in DATA[0]} , schema=_UpperCAmelCase) writer.write_table(_UpperCAmelCase) writer.close() return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset.json') SCREAMING_SNAKE_CASE = {'data': DATA} with open(_UpperCAmelCase , 'w') as f: json.dump(_UpperCAmelCase , _UpperCAmelCase) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset.json') SCREAMING_SNAKE_CASE = {'data': DATA_DICT_OF_LISTS} with open(_UpperCAmelCase , 'w') as f: json.dump(_UpperCAmelCase , _UpperCAmelCase) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset.jsonl') with open(_UpperCAmelCase , 'w') as f: for item in DATA: f.write(json.dumps(_UpperCAmelCase) + '\n') return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset2.jsonl') with open(_UpperCAmelCase , 'w') as f: for item in DATA: f.write(json.dumps(_UpperCAmelCase) + '\n') return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset_312.jsonl') with open(_UpperCAmelCase , 'w') as f: for item in DATA_312: f.write(json.dumps(_UpperCAmelCase) + '\n') return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset-str.jsonl') with open(_UpperCAmelCase , 'w') as f: for item in DATA_STR: f.write(json.dumps(_UpperCAmelCase) + '\n') return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): import gzip SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset.txt.gz') with open(_UpperCAmelCase , 'rb') as orig_file: with gzip.open(_UpperCAmelCase , 'wb') as zipped_file: zipped_file.writelines(_UpperCAmelCase) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): import gzip SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset.jsonl.gz') with open(_UpperCAmelCase , 'rb') as orig_file: with gzip.open(_UpperCAmelCase , 'wb') as zipped_file: zipped_file.writelines(_UpperCAmelCase) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset.jsonl.zip' with zipfile.ZipFile(_UpperCAmelCase , 'w') as f: f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase)) f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase)) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset_nested.jsonl.zip' with zipfile.ZipFile(_UpperCAmelCase , 'w') as f: f.write(_UpperCAmelCase , arcname=os.path.join('nested' , os.path.basename(_UpperCAmelCase))) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset_with_dir.jsonl.zip' with zipfile.ZipFile(_UpperCAmelCase , 'w') as f: f.write(_UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_UpperCAmelCase))) f.write(_UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_UpperCAmelCase))) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset.jsonl.tar' with tarfile.TarFile(_UpperCAmelCase , 'w') as f: f.add(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase)) f.add(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase)) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset_nested.jsonl.tar' with tarfile.TarFile(_UpperCAmelCase , 'w') as f: f.add(_UpperCAmelCase , arcname=os.path.join('nested' , os.path.basename(_UpperCAmelCase))) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = ['0', '1', '2', '3'] SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset.txt') with open(_UpperCAmelCase , 'w') as f: for item in data: f.write(item + '\n') return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = ['0', '1', '2', '3'] SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset2.txt') with open(_UpperCAmelCase , 'w') as f: for item in data: f.write(item + '\n') return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = ['0', '1', '2', '3'] SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset.abc' with open(_UpperCAmelCase , 'w') as f: for item in data: f.write(item + '\n') return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset.text.zip' with zipfile.ZipFile(_UpperCAmelCase , 'w') as f: f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase)) f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase)) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset_with_dir.text.zip' with zipfile.ZipFile(_UpperCAmelCase , 'w') as f: f.write(_UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_UpperCAmelCase))) f.write(_UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_UpperCAmelCase))) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset.ext.zip' with zipfile.ZipFile(_UpperCAmelCase , 'w') as f: f.write(_UpperCAmelCase , arcname=os.path.basename('unsupported.ext')) f.write(_UpperCAmelCase , arcname=os.path.basename('unsupported_2.ext')) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third']) SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset_with_unicode_new_lines.txt') with open(_UpperCAmelCase , 'w' , encoding='utf-8') as f: f.write(_UpperCAmelCase) return path @pytest.fixture(scope='session') def lowerCamelCase__ (): return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg') @pytest.fixture(scope='session') def lowerCamelCase__ (): return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav') @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset.img.zip' with zipfile.ZipFile(_UpperCAmelCase , 'w') as f: f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase)) f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase).replace('.jpg' , '2.jpg')) return path @pytest.fixture(scope='session') def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data_dir') (data_dir / "subdir").mkdir() with open(data_dir / 'subdir' / 'train.txt' , 'w') as f: f.write('foo\n' * 10) with open(data_dir / 'subdir' / 'test.txt' , 'w') as f: f.write('bar\n' * 10) # hidden file with open(data_dir / 'subdir' / '.test.txt' , 'w') as f: f.write('bar\n' * 10) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / '.subdir' / 'train.txt' , 'w') as f: f.write('foo\n' * 10) with open(data_dir / '.subdir' / 'test.txt' , 'w') as f: f.write('bar\n' * 10) return data_dir
73
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed a_ : Any = 'true' def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=16): set_seed(42) SCREAMING_SNAKE_CASE = RegressionModel() SCREAMING_SNAKE_CASE = deepcopy(_UpperCAmelCase) SCREAMING_SNAKE_CASE = RegressionDataset(length=_UpperCAmelCase) SCREAMING_SNAKE_CASE = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase) model.to(accelerator.device) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase) return model, ddp_model, dataloader def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased') SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' , split='validation') def tokenize_function(_UpperCAmelCase): SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase) return outputs with accelerator.main_process_first(): SCREAMING_SNAKE_CASE = dataset.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels') def collate_fn(_UpperCAmelCase): if use_longest: return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt') return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt') return DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=16) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = Accelerator(dispatch_batches=_UpperCAmelCase , split_batches=_UpperCAmelCase) SCREAMING_SNAKE_CASE = get_dataloader(_UpperCAmelCase , not dispatch_batches) SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained( 'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [] for batch in dataloader: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((logit, target)) logits_and_targets.append((logit, target)) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], [] for logit, targ in logits_and_targets: logits.append(_UpperCAmelCase) targs.append(_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.cat(_UpperCAmelCase), torch.cat(_UpperCAmelCase) return logits, targs def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=16): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_basic_setup(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_predictions(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) assert ( len(_UpperCAmelCase) == num_samples ), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCAmelCase)}''' def lowerCamelCase__ (_UpperCAmelCase = False , _UpperCAmelCase = False): SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc') SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_mrpc_setup(_UpperCAmelCase , _UpperCAmelCase) # First do baseline SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['no'] model.to(_UpperCAmelCase) model.eval() for batch in dataloader: batch.to(_UpperCAmelCase) with torch.inference_mode(): SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1) metric.add_batch(predictions=_UpperCAmelCase , references=batch['labels']) SCREAMING_SNAKE_CASE = metric.compute() # Then do distributed SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['ddp'] model.eval() for batch in dataloader: with torch.inference_mode(): SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1) SCREAMING_SNAKE_CASE = batch['labels'] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((preds, references)) metric.add_batch(predictions=_UpperCAmelCase , references=_UpperCAmelCase) SCREAMING_SNAKE_CASE = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key]), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('**Testing gather_for_metrics**') for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''') test_mrpc(_UpperCAmelCase , _UpperCAmelCase) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test torch metrics**') for split_batches in [True, False]: for dispatch_batches in [True, False]: SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase) if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''') test_torch_metrics(_UpperCAmelCase , 99) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test last batch is not dropped when perfectly divisible**') SCREAMING_SNAKE_CASE = Accelerator() test_torch_metrics(_UpperCAmelCase , 512) accelerator.state._reset_state() def lowerCamelCase__ (_UpperCAmelCase): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
73
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available a_ : Tuple = {'tokenization_herbert': ['HerbertTokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Any = ['HerbertTokenizerFast'] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys a_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
73
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available a_ : List[str] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ['GPTSw3Tokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys a_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
73
1
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def lowerCamelCase__ (_UpperCAmelCase = 8): SCREAMING_SNAKE_CASE = ascii_letters + digits + punctuation return "".join(secrets.choice(_UpperCAmelCase) for _ in range(_UpperCAmelCase)) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... i -= len(_UpperCAmelCase) SCREAMING_SNAKE_CASE = i // 3 SCREAMING_SNAKE_CASE = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) SCREAMING_SNAKE_CASE = ( chars_incl + random(_UpperCAmelCase , quotient + remainder) + random(_UpperCAmelCase , _UpperCAmelCase) + random(_UpperCAmelCase , _UpperCAmelCase) ) SCREAMING_SNAKE_CASE = list(_UpperCAmelCase) shuffle(_UpperCAmelCase) return "".join(_UpperCAmelCase) # random is a generalised function for letters, characters and numbers def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return "".join(secrets.choice(_UpperCAmelCase) for _ in range(_UpperCAmelCase)) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): pass # Put your code here... def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): pass # Put your code here... def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): pass # Put your code here... def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 8): if len(_UpperCAmelCase) < min_length: # Your Password must be at least 8 characters long return False SCREAMING_SNAKE_CASE = any(char in ascii_uppercase for char in password) SCREAMING_SNAKE_CASE = any(char in ascii_lowercase for char in password) SCREAMING_SNAKE_CASE = any(char in digits for char in password) SCREAMING_SNAKE_CASE = any(char in punctuation for char in password) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = int(input('Please indicate the max length of your password: ').strip()) SCREAMING_SNAKE_CASE = input( 'Please indicate the characters that must be in your password: ').strip() print('Password generated:' , password_generator(_UpperCAmelCase)) print( 'Alternative Password generated:' , alternative_password_generator(_UpperCAmelCase , _UpperCAmelCase) , ) print('[If you are thinking of using this passsword, You better save it.]') if __name__ == "__main__": main()
73
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path a_ : str = [ {'dataset': 'wikipedia', 'config_name': '20220301.de'}, {'dataset': 'wikipedia', 'config_name': '20220301.en'}, {'dataset': 'wikipedia', 'config_name': '20220301.fr'}, {'dataset': 'wikipedia', 'config_name': '20220301.frr'}, {'dataset': 'wikipedia', 'config_name': '20220301.it'}, {'dataset': 'wikipedia', 'config_name': '20220301.simple'}, {'dataset': 'snli', 'config_name': 'plain_text'}, {'dataset': 'eli5', 'config_name': 'LFQA_reddit'}, {'dataset': 'wiki40b', 'config_name': 'en'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'}, {'dataset': 'natural_questions', 'config_name': 'default'}, ] def lowerCamelCase__ (_UpperCAmelCase=True): if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A__ ) ) class _snake_case ( A__ ): _lowercase : Optional[Any] = None _lowercase : Optional[Any] = None def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Optional[Any]: with TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE = dataset_module_factory(a , cache_dir=a) SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=a) SCREAMING_SNAKE_CASE = builder_cls( cache_dir=a , config_name=a , hash=dataset_module.hash , ) SCREAMING_SNAKE_CASE = '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=a).replace(os.sep , '/'), config.DATASET_INFO_FILENAME, ]) SCREAMING_SNAKE_CASE = cached_path(a , cache_dir=a) self.assertTrue(os.path.exists(a)) @pytest.mark.integration def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('test_hf_gcp') / 'test_wikipedia_simple' SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase) SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path) SCREAMING_SNAKE_CASE = builder_cls( cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam SCREAMING_SNAKE_CASE = None builder_instance.download_and_prepare() SCREAMING_SNAKE_CASE = builder_instance.as_dataset() assert ds @pytest.mark.integration def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase) SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=_UpperCAmelCase) SCREAMING_SNAKE_CASE = builder_cls( cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , ) SCREAMING_SNAKE_CASE = builder_instance.as_streaming_dataset() assert ds assert isinstance(_UpperCAmelCase , _UpperCAmelCase) assert "train" in ds assert isinstance(ds['train'] , _UpperCAmelCase) assert next(iter(ds['train']))
73
1
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class _snake_case : def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> List[str]: return None class _snake_case : def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a) -> int: return None class _snake_case ( unittest.TestCase ): _lowercase : Optional[int] = [ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def SCREAMING_SNAKE_CASE__ ( self) -> Any: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(a , 'tf' , 12 , **a) @require_torch @slow def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(a , 'pt' , 12 , **a) @require_torch @slow def SCREAMING_SNAKE_CASE__ ( self) -> int: from transformers import BertModel SCREAMING_SNAKE_CASE = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t') as vocab_file: vocab_file.write('\n'.join(a)) vocab_file.flush() SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name) with TemporaryDirectory() as bert_save_dir: SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(a))) model.save_pretrained(a) self._test_export(a , 'pt' , 12 , a) @require_tf @slow def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: SCREAMING_SNAKE_CASE = self._test_export(a , 'tf' , 12 , **a) SCREAMING_SNAKE_CASE = quantize(Path(a)) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(a).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model') @require_torch @slow def SCREAMING_SNAKE_CASE__ ( self) -> Dict: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: SCREAMING_SNAKE_CASE = self._test_export(a , 'pt' , 12 , **a) SCREAMING_SNAKE_CASE = quantize(a) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(a).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model') def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a=None , **a) -> List[Any]: try: # Compute path with TemporaryDirectory() as tempdir: SCREAMING_SNAKE_CASE = Path(a).joinpath('model.onnx') # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(a , a , a , a , a , **a) return path except Exception as e: self.fail(a) @require_torch @require_tokenizers @slow def SCREAMING_SNAKE_CASE__ ( self) -> int: from transformers import BertModel SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random')) SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random') self._test_infer_dynamic_axis(a , a , 'pt') @require_tf @require_tokenizers @slow def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: from transformers import TFBertModel SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random')) SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random') self._test_infer_dynamic_axis(a , a , 'tf') def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> int: SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(a , a) SCREAMING_SNAKE_CASE = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = infer_shapes(a , a) # Assert all variables are present self.assertEqual(len(a) , len(a)) self.assertTrue(all(var_name in shapes for var_name in variable_names)) self.assertSequenceEqual(variable_names[:3] , a) self.assertSequenceEqual(variable_names[3:] , a) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'}) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'}) self.assertDictEqual(shapes['output_1'] , {0: 'batch'}) def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask', 'token_type_ids'] SCREAMING_SNAKE_CASE = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , a , a) # Should have exactly the same number of args (all are valid) self.assertEqual(len(a) , 3) # Should have exactly the same input names self.assertEqual(set(a) , set(a)) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(a , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask'])) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , a , a) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(a) , 1) self.assertEqual(len(a) , 1) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['input_ids']) self.assertEqual(ordered_input_names[0] , 'input_ids') def SCREAMING_SNAKE_CASE__ ( self) -> int: SCREAMING_SNAKE_CASE = generate_identified_filename(Path('/home/something/my_fake_model.onnx') , '-test') self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix())
73
from __future__ import annotations def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = 2 SCREAMING_SNAKE_CASE = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(_UpperCAmelCase) if n > 1: factors.append(_UpperCAmelCase) return factors if __name__ == "__main__": import doctest doctest.testmod()
73
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : List[str] = logging.get_logger(__name__) a_ : Any = { 'microsoft/unispeech-large-1500h-cv': ( 'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class _snake_case ( A__ ): _lowercase : Optional[int] = '''unispeech''' def __init__( self , a=32 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=0.1 , a=0.0 , a=0.0 , a=0.1 , a=0.1 , a=0.02 , a=1E-5 , a="group" , a="gelu" , a=(512, 512, 512, 512, 512, 512, 512) , a=(5, 2, 2, 2, 2, 2, 2) , a=(10, 3, 3, 3, 3, 2, 2) , a=False , a=128 , a=16 , a=False , a=True , a=0.05 , a=10 , a=2 , a=0.0 , a=10 , a=0 , a=320 , a=2 , a=0.1 , a=100 , a=256 , a=256 , a=0.1 , a="mean" , a=False , a=False , a=256 , a=80 , a=0 , a=1 , a=2 , a=0.5 , **a , ) -> Optional[int]: super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = feat_extract_norm SCREAMING_SNAKE_CASE = feat_extract_activation SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = conv_bias SCREAMING_SNAKE_CASE = num_conv_pos_embeddings SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups SCREAMING_SNAKE_CASE = len(self.conv_dim) SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = activation_dropout SCREAMING_SNAKE_CASE = feat_proj_dropout SCREAMING_SNAKE_CASE = final_dropout SCREAMING_SNAKE_CASE = layerdrop SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = num_ctc_classes SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = do_stable_layer_norm SCREAMING_SNAKE_CASE = use_weighted_layer_sum SCREAMING_SNAKE_CASE = classifier_proj_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' f''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE = apply_spec_augment SCREAMING_SNAKE_CASE = mask_time_prob SCREAMING_SNAKE_CASE = mask_time_length SCREAMING_SNAKE_CASE = mask_time_min_masks SCREAMING_SNAKE_CASE = mask_feature_prob SCREAMING_SNAKE_CASE = mask_feature_length SCREAMING_SNAKE_CASE = mask_feature_min_masks # parameters for pretraining with codevector quantized representations SCREAMING_SNAKE_CASE = num_codevectors_per_group SCREAMING_SNAKE_CASE = num_codevector_groups SCREAMING_SNAKE_CASE = contrastive_logits_temperature SCREAMING_SNAKE_CASE = feat_quantizer_dropout SCREAMING_SNAKE_CASE = num_negatives SCREAMING_SNAKE_CASE = codevector_dim SCREAMING_SNAKE_CASE = proj_codevector_dim SCREAMING_SNAKE_CASE = diversity_loss_weight # ctc loss SCREAMING_SNAKE_CASE = ctc_loss_reduction SCREAMING_SNAKE_CASE = ctc_zero_infinity # pretraining loss SCREAMING_SNAKE_CASE = replace_prob @property def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: return functools.reduce(operator.mul , self.conv_stride , 1)
73
import math import os import sys def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = '' try: with open(_UpperCAmelCase , 'rb') as binary_file: SCREAMING_SNAKE_CASE = binary_file.read() for dat in data: SCREAMING_SNAKE_CASE = F'''{dat:08b}''' result += curr_byte return result except OSError: print('File not accessible') sys.exit() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): lexicon.pop(_UpperCAmelCase) SCREAMING_SNAKE_CASE = last_match_id if math.loga(_UpperCAmelCase).is_integer(): for curr_key in lexicon: SCREAMING_SNAKE_CASE = '0' + lexicon[curr_key] SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:] def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = {'0': '0', '1': '1'} SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = '', '' SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) for i in range(len(_UpperCAmelCase)): curr_string += data_bits[i] if curr_string not in lexicon: continue SCREAMING_SNAKE_CASE = lexicon[curr_string] result += last_match_id add_key_to_lexicon(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) index += 1 SCREAMING_SNAKE_CASE = '' while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": SCREAMING_SNAKE_CASE = lexicon[curr_string] result += last_match_id return result def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = os.path.getsize(_UpperCAmelCase) SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:] SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) return "0" * (length_length - 1) + file_length_binary + compressed def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = 8 try: with open(_UpperCAmelCase , 'wb') as opened_file: SCREAMING_SNAKE_CASE = [ to_write[i : i + byte_length] for i in range(0 , len(_UpperCAmelCase) , _UpperCAmelCase) ] if len(result_byte_array[-1]) % byte_length == 0: result_byte_array.append('10000000') else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1]) - 1 ) for elem in result_byte_array: opened_file.write(int(_UpperCAmelCase , 2).to_bytes(1 , byteorder='big')) except OSError: print('File not accessible') sys.exit() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = read_file_binary(_UpperCAmelCase) SCREAMING_SNAKE_CASE = compress_data(_UpperCAmelCase) SCREAMING_SNAKE_CASE = add_file_length(_UpperCAmelCase , _UpperCAmelCase) write_file_binary(_UpperCAmelCase , _UpperCAmelCase) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
73
1
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _snake_case ( A__ ): _lowercase : Any = ['''image_processor''', '''tokenizer'''] _lowercase : Tuple = '''LayoutLMv3ImageProcessor''' _lowercase : int = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''') def __init__( self , a=None , a=None , **a) -> int: SCREAMING_SNAKE_CASE = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , a , ) SCREAMING_SNAKE_CASE = kwargs.pop('feature_extractor') SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(a , a) def __call__( self , a , a = None , a = None , a = None , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = None , a = False , a = False , a = False , a = False , a = True , a = None , **a , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.') if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.') # first, apply the image processor SCREAMING_SNAKE_CASE = self.image_processor(images=a , return_tensors=a) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(a , a): SCREAMING_SNAKE_CASE = [text] # add batch dimension (as the image processor always adds a batch dimension) SCREAMING_SNAKE_CASE = features['words'] SCREAMING_SNAKE_CASE = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , ) # add pixel values SCREAMING_SNAKE_CASE = features.pop('pixel_values') if return_overflowing_tokens is True: SCREAMING_SNAKE_CASE = self.get_overflowing_images(a , encoded_inputs['overflow_to_sample_mapping']) SCREAMING_SNAKE_CASE = images return encoded_inputs def SCREAMING_SNAKE_CASE__ ( self , a , a) -> str: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image SCREAMING_SNAKE_CASE = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx]) if len(a) != len(a): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' f''' {len(a)} and {len(a)}''') return images_with_overflow def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> Union[str, Any]: return self.tokenizer.batch_decode(*a , **a) def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> Optional[Any]: return self.tokenizer.decode(*a , **a) @property def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , ) return self.image_processor
73
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def lowerCamelCase__ (_UpperCAmelCase): return 1.0 / (1.0 + np.exp(-_outputs)) def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = np.max(_outputs , axis=-1 , keepdims=_UpperCAmelCase) SCREAMING_SNAKE_CASE = np.exp(_outputs - maxes) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_UpperCAmelCase) class _snake_case ( A__ ): _lowercase : Tuple = '''sigmoid''' _lowercase : List[str] = '''softmax''' _lowercase : Tuple = '''none''' @add_end_docstrings( A__ , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class _snake_case ( A__ ): _lowercase : Optional[Any] = False _lowercase : Tuple = ClassificationFunction.NONE def __init__( self , **a) -> Optional[Any]: super().__init__(**a) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING) def SCREAMING_SNAKE_CASE__ ( self , a=None , a=None , a="" , **a) -> Tuple: # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" SCREAMING_SNAKE_CASE = tokenizer_kwargs SCREAMING_SNAKE_CASE = {} if hasattr(self.model.config , 'return_all_scores') and return_all_scores is None: SCREAMING_SNAKE_CASE = self.model.config.return_all_scores if isinstance(a , a) or top_k is None: SCREAMING_SNAKE_CASE = top_k SCREAMING_SNAKE_CASE = False elif return_all_scores is not None: warnings.warn( '`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of' ' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , a , ) if return_all_scores: SCREAMING_SNAKE_CASE = None else: SCREAMING_SNAKE_CASE = 1 if isinstance(a , a): SCREAMING_SNAKE_CASE = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: SCREAMING_SNAKE_CASE = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *a , **a) -> Optional[int]: SCREAMING_SNAKE_CASE = super().__call__(*a , **a) # TODO try and retrieve it in a nicer way from _sanitize_parameters. SCREAMING_SNAKE_CASE = 'top_k' not in kwargs if isinstance(args[0] , a) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def SCREAMING_SNAKE_CASE__ ( self , a , **a) -> Dict[str, GenericTensor]: SCREAMING_SNAKE_CASE = self.framework if isinstance(a , a): return self.tokenizer(**a , return_tensors=a , **a) elif isinstance(a , a) and len(a) == 1 and isinstance(inputs[0] , a) and len(inputs[0]) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a , **a) elif isinstance(a , a): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( 'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a' ' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.') return self.tokenizer(a , return_tensors=a , **a) def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]: return self.model(**a) def SCREAMING_SNAKE_CASE__ ( self , a , a=None , a=1 , a=True) -> Any: # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: SCREAMING_SNAKE_CASE = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: SCREAMING_SNAKE_CASE = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , 'function_to_apply') and function_to_apply is None: SCREAMING_SNAKE_CASE = self.model.config.function_to_apply else: SCREAMING_SNAKE_CASE = ClassificationFunction.NONE SCREAMING_SNAKE_CASE = model_outputs['logits'][0] SCREAMING_SNAKE_CASE = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: SCREAMING_SNAKE_CASE = sigmoid(a) elif function_to_apply == ClassificationFunction.SOFTMAX: SCREAMING_SNAKE_CASE = softmax(a) elif function_to_apply == ClassificationFunction.NONE: SCREAMING_SNAKE_CASE = outputs else: raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''') if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} SCREAMING_SNAKE_CASE = [ {'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(a) ] if not _legacy: dict_scores.sort(key=lambda a: x["score"] , reverse=a) if top_k is not None: SCREAMING_SNAKE_CASE = dict_scores[:top_k] return dict_scores
73
1
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : Union[str, Any] = { 'SCUT-DLVCLab/lilt-roberta-en-base': ( 'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json' ), } class _snake_case ( A__ ): _lowercase : Optional[int] = '''lilt''' def __init__( self , a=3_0522 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=2 , a=0.02 , a=1E-12 , a=0 , a="absolute" , a=None , a=4 , a=1024 , **a , ) -> Any: super().__init__(pad_token_id=a , **a) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = position_embedding_type SCREAMING_SNAKE_CASE = classifier_dropout SCREAMING_SNAKE_CASE = channel_shrink_ratio SCREAMING_SNAKE_CASE = max_ad_position_embeddings
73
import heapq as hq import math from collections.abc import Iterator class _snake_case : def __init__( self , a) -> Optional[Any]: SCREAMING_SNAKE_CASE = str(id_) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = {} # {vertex:distance} def __lt__( self , a) -> Dict: return self.key < other.key def __repr__( self) -> Optional[Any]: return self.id def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]: self.neighbors.append(a) def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Tuple: SCREAMING_SNAKE_CASE = weight def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1]) graph[b - 1].add_neighbor(graph[a - 1]) # add the edges: graph[a - 1].add_edge(graph[b - 1] , _UpperCAmelCase) graph[b - 1].add_edge(graph[a - 1] , _UpperCAmelCase) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [] for u in graph: SCREAMING_SNAKE_CASE = math.inf SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = graph[:] while q: SCREAMING_SNAKE_CASE = min(_UpperCAmelCase) q.remove(_UpperCAmelCase) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): SCREAMING_SNAKE_CASE = u SCREAMING_SNAKE_CASE = u.edges[v.id] for i in range(1 , len(_UpperCAmelCase)): a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1)) return a def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): for u in graph: SCREAMING_SNAKE_CASE = math.inf SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = list(_UpperCAmelCase) hq.heapify(_UpperCAmelCase) while h: SCREAMING_SNAKE_CASE = hq.heappop(_UpperCAmelCase) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): SCREAMING_SNAKE_CASE = u SCREAMING_SNAKE_CASE = u.edges[v.id] hq.heapify(_UpperCAmelCase) for i in range(1 , len(_UpperCAmelCase)): yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1) def lowerCamelCase__ (): pass if __name__ == "__main__": import doctest doctest.testmod()
73
1
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING a_ : Tuple = logging.get_logger(__name__) a_ : Dict = { 'microsoft/conditional-detr-resnet-50': ( 'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json' ), } class _snake_case ( A__ ): _lowercase : List[str] = '''conditional_detr''' _lowercase : Optional[Any] = ['''past_key_values'''] _lowercase : Optional[Any] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self , a=True , a=None , a=3 , a=300 , a=6 , a=2048 , a=8 , a=6 , a=2048 , a=8 , a=0.0 , a=0.0 , a=True , a="relu" , a=256 , a=0.1 , a=0.0 , a=0.0 , a=0.02 , a=1.0 , a=False , a="sine" , a="resnet50" , a=True , a=False , a=2 , a=5 , a=2 , a=1 , a=1 , a=2 , a=5 , a=2 , a=0.25 , **a , ) -> List[str]: if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.') if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.') SCREAMING_SNAKE_CASE = CONFIG_MAPPING['resnet'](out_features=['stage4']) elif isinstance(a , a): SCREAMING_SNAKE_CASE = backbone_config.get('model_type') SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE = config_class.from_dict(a) SCREAMING_SNAKE_CASE = use_timm_backbone SCREAMING_SNAKE_CASE = backbone_config SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = num_queries SCREAMING_SNAKE_CASE = d_model SCREAMING_SNAKE_CASE = encoder_ffn_dim SCREAMING_SNAKE_CASE = encoder_layers SCREAMING_SNAKE_CASE = encoder_attention_heads SCREAMING_SNAKE_CASE = decoder_ffn_dim SCREAMING_SNAKE_CASE = decoder_layers SCREAMING_SNAKE_CASE = decoder_attention_heads SCREAMING_SNAKE_CASE = dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = activation_dropout SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = init_std SCREAMING_SNAKE_CASE = init_xavier_std SCREAMING_SNAKE_CASE = encoder_layerdrop SCREAMING_SNAKE_CASE = decoder_layerdrop SCREAMING_SNAKE_CASE = encoder_layers SCREAMING_SNAKE_CASE = auxiliary_loss SCREAMING_SNAKE_CASE = position_embedding_type SCREAMING_SNAKE_CASE = backbone SCREAMING_SNAKE_CASE = use_pretrained_backbone SCREAMING_SNAKE_CASE = dilation # Hungarian matcher SCREAMING_SNAKE_CASE = class_cost SCREAMING_SNAKE_CASE = bbox_cost SCREAMING_SNAKE_CASE = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE = mask_loss_coefficient SCREAMING_SNAKE_CASE = dice_loss_coefficient SCREAMING_SNAKE_CASE = cls_loss_coefficient SCREAMING_SNAKE_CASE = bbox_loss_coefficient SCREAMING_SNAKE_CASE = giou_loss_coefficient SCREAMING_SNAKE_CASE = focal_alpha super().__init__(is_encoder_decoder=a , **a) @property def SCREAMING_SNAKE_CASE__ ( self) -> int: return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE__ ( self) -> int: return self.d_model def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__) if self.backbone_config is not None: SCREAMING_SNAKE_CASE = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE = self.__class__.model_type return output class _snake_case ( A__ ): _lowercase : int = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ]) @property def SCREAMING_SNAKE_CASE__ ( self) -> float: return 1E-5 @property def SCREAMING_SNAKE_CASE__ ( self) -> int: return 12
73
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a_ : Optional[Any] = { 'configuration_mask2former': [ 'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Mask2FormerConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Union[str, Any] = ['Mask2FormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ 'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
73
1
def lowerCamelCase__ (_UpperCAmelCase): # noqa: E741 SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = [0] * n SCREAMING_SNAKE_CASE = [False] * n SCREAMING_SNAKE_CASE = [False] * n def dfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): if parent == root: out_edge_count += 1 SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = at for to in l[at]: if to == parent: pass elif not visited[to]: SCREAMING_SNAKE_CASE = dfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = min(low[at] , low[to]) # AP found via bridge if at < low[to]: SCREAMING_SNAKE_CASE = True # AP found via cycle if at == low[to]: SCREAMING_SNAKE_CASE = True else: SCREAMING_SNAKE_CASE = min(low[at] , _UpperCAmelCase) return out_edge_count for i in range(_UpperCAmelCase): if not visited[i]: SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = dfs(_UpperCAmelCase , _UpperCAmelCase , -1 , _UpperCAmelCase) SCREAMING_SNAKE_CASE = out_edge_count > 1 for x in range(len(_UpperCAmelCase)): if is_art[x] is True: print(_UpperCAmelCase) # Adjacency list of graph a_ : Tuple = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
73
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Dict = logging.get_logger(__name__) a_ : Union[str, Any] = { 'edbeeching/decision-transformer-gym-hopper-medium': ( 'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class _snake_case ( A__ ): _lowercase : Optional[Any] = '''decision_transformer''' _lowercase : str = ['''past_key_values'''] _lowercase : Union[str, Any] = { '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , a=17 , a=4 , a=128 , a=4096 , a=True , a=1 , a=1024 , a=3 , a=1 , a=None , a="relu" , a=0.1 , a=0.1 , a=0.1 , a=1E-5 , a=0.02 , a=True , a=True , a=5_0256 , a=5_0256 , a=False , a=False , **a , ) -> List[str]: SCREAMING_SNAKE_CASE = state_dim SCREAMING_SNAKE_CASE = act_dim SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = max_ep_len SCREAMING_SNAKE_CASE = action_tanh SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = n_positions SCREAMING_SNAKE_CASE = n_layer SCREAMING_SNAKE_CASE = n_head SCREAMING_SNAKE_CASE = n_inner SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = resid_pdrop SCREAMING_SNAKE_CASE = embd_pdrop SCREAMING_SNAKE_CASE = attn_pdrop SCREAMING_SNAKE_CASE = layer_norm_epsilon SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = scale_attn_weights SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx SCREAMING_SNAKE_CASE = reorder_and_upcast_attn SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = eos_token_id super().__init__(bos_token_id=a , eos_token_id=a , **a)
73
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : Union[str, Any] = { 'configuration_autoformer': [ 'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AutoformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[int] = [ 'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'AutoformerForPrediction', 'AutoformerModel', 'AutoformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys a_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
73
import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ : Optional[int] = 16 a_ : Any = 32 def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 16): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('bert-base-cased') SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc') def tokenize_function(_UpperCAmelCase): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels') def collate_fn(_UpperCAmelCase): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE = 8 else: SCREAMING_SNAKE_CASE = None return tokenizer.pad( _UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase) SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == 'fp8') , ) return train_dataloader, eval_dataloader def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): # Initialize accelerator SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE = config['lr'] SCREAMING_SNAKE_CASE = int(config['num_epochs']) SCREAMING_SNAKE_CASE = int(config['seed']) SCREAMING_SNAKE_CASE = int(config['batch_size']) SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc') # If the batch size is too big we use gradient accumulation SCREAMING_SNAKE_CASE = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: SCREAMING_SNAKE_CASE = batch_size // MAX_GPU_BATCH_SIZE SCREAMING_SNAKE_CASE = MAX_GPU_BATCH_SIZE set_seed(_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE = model.to(accelerator.device) # Instantiate optimizer SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=_UpperCAmelCase) # Instantiate scheduler SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) # Now we train the model for epoch in range(_UpperCAmelCase): model.train() for step, batch in enumerate(_UpperCAmelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.loss SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps accelerator.backward(_UpperCAmelCase) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCAmelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch['labels'])) metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) SCREAMING_SNAKE_CASE = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , _UpperCAmelCase) def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Simple example of training script.') parser.add_argument( '--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.') SCREAMING_SNAKE_CASE = parser.parse_args() SCREAMING_SNAKE_CASE = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(_UpperCAmelCase , _UpperCAmelCase) if __name__ == "__main__": main()
73
1
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None SCREAMING_SNAKE_CASE = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(_UpperCAmelCase): return None SCREAMING_SNAKE_CASE = sorted_collection[point] if current_item == item: return point else: if point < left: SCREAMING_SNAKE_CASE = left SCREAMING_SNAKE_CASE = point elif point > right: SCREAMING_SNAKE_CASE = right SCREAMING_SNAKE_CASE = point else: if item < current_item: SCREAMING_SNAKE_CASE = point - 1 else: SCREAMING_SNAKE_CASE = point + 1 return None def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None SCREAMING_SNAKE_CASE = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(_UpperCAmelCase): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) elif point > right: return interpolation_search_by_recursion(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , point - 1) else: return interpolation_search_by_recursion( _UpperCAmelCase , _UpperCAmelCase , point + 1 , _UpperCAmelCase) def lowerCamelCase__ (_UpperCAmelCase): if collection != sorted(_UpperCAmelCase): raise ValueError('Collection must be ascending sorted') return True if __name__ == "__main__": import sys a_ : Tuple = 0 if debug == 1: a_ : Optional[int] = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit('Sequence must be ascending sorted to apply interpolation search') a_ : List[str] = 67 a_ : List[str] = interpolation_search(collection, target) if result is not None: print(f"""{target} found at positions: {result}""") else: print('Not found')
73
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a_ : int = { 'configuration_rag': ['RagConfig'], 'retrieval_rag': ['RagRetriever'], 'tokenization_rag': ['RagTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ 'RagModel', 'RagPreTrainedModel', 'RagSequenceForGeneration', 'RagTokenForGeneration', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Tuple = [ 'TFRagModel', 'TFRagPreTrainedModel', 'TFRagSequenceForGeneration', 'TFRagTokenForGeneration', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
73
1
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _snake_case ( A__ ): def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=True , a=False , a=False , a=False , a=2 , a=99 , a=0 , a=32 , a=5 , a=4 , a=0.1 , a=0.1 , a=512 , a=12 , a=2 , a=0.02 , a=3 , a=4 , a="last" , a=None , a=None , ) -> Optional[Any]: SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_input_lengths SCREAMING_SNAKE_CASE = use_token_type_ids SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = gelu_activation SCREAMING_SNAKE_CASE = sinusoidal_embeddings SCREAMING_SNAKE_CASE = causal SCREAMING_SNAKE_CASE = asm SCREAMING_SNAKE_CASE = n_langs SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = n_special SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = type_sequence_label_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = num_choices SCREAMING_SNAKE_CASE = summary_type SCREAMING_SNAKE_CASE = use_proj SCREAMING_SNAKE_CASE = scope def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length]) SCREAMING_SNAKE_CASE = None if self.use_input_lengths: SCREAMING_SNAKE_CASE = ( ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.n_langs) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , 2).float() SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices) SCREAMING_SNAKE_CASE = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Dict: SCREAMING_SNAKE_CASE = FlaubertModel(config=a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , lengths=a , langs=a) SCREAMING_SNAKE_CASE = model(a , langs=a) SCREAMING_SNAKE_CASE = model(a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str: SCREAMING_SNAKE_CASE = FlaubertWithLMHeadModel(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , token_type_ids=a , labels=a) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Any: SCREAMING_SNAKE_CASE = FlaubertForQuestionAnsweringSimple(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a) SCREAMING_SNAKE_CASE = model(a , start_positions=a , end_positions=a) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Dict: SCREAMING_SNAKE_CASE = FlaubertForQuestionAnswering(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a) SCREAMING_SNAKE_CASE = model( a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , ) SCREAMING_SNAKE_CASE = model( a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , ) ((SCREAMING_SNAKE_CASE) , ) = result_with_labels.to_tuple() SCREAMING_SNAKE_CASE = model(a , start_positions=a , end_positions=a) ((SCREAMING_SNAKE_CASE) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , ()) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> int: SCREAMING_SNAKE_CASE = FlaubertForSequenceClassification(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a) SCREAMING_SNAKE_CASE = model(a , labels=a) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Any: SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = FlaubertForTokenClassification(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Tuple: SCREAMING_SNAKE_CASE = self.num_choices SCREAMING_SNAKE_CASE = FlaubertForMultipleChoice(config=a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE = model( a , attention_mask=a , token_type_ids=a , labels=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) = config_and_inputs SCREAMING_SNAKE_CASE = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class _snake_case ( A__ , A__ , unittest.TestCase ): _lowercase : Any = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) _lowercase : Any = ( { '''feature-extraction''': FlaubertModel, '''fill-mask''': FlaubertWithLMHeadModel, '''question-answering''': FlaubertForQuestionAnsweringSimple, '''text-classification''': FlaubertForSequenceClassification, '''token-classification''': FlaubertForTokenClassification, '''zero-shot''': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a) -> Optional[int]: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast') ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def SCREAMING_SNAKE_CASE__ ( self , a , a , a=False) -> Any: SCREAMING_SNAKE_CASE = super()._prepare_for_class(a , a , return_labels=a) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a) SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a) return inputs_dict def SCREAMING_SNAKE_CASE__ ( self) -> Dict: SCREAMING_SNAKE_CASE = FlaubertModelTester(self) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , emb_dim=37) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*a) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*a) def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*a) def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*a) def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*a) def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*a) def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*a) @slow def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = FlaubertModel.from_pretrained(a) self.assertIsNotNone(a) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = model_class(config=a) SCREAMING_SNAKE_CASE = self._prepare_for_class(a , a) SCREAMING_SNAKE_CASE = torch.jit.trace( a , (inputs_dict['input_ids'].to('cpu'), inputs_dict['attention_mask'].to('cpu'))) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a , os.path.join(a , 'traced_model.pt')) SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(a , 'traced_model.pt') , map_location=a) loaded(inputs_dict['input_ids'].to(a) , inputs_dict['attention_mask'].to(a)) @require_torch class _snake_case ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased') SCREAMING_SNAKE_CASE = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) with torch.no_grad(): SCREAMING_SNAKE_CASE = model(a)[0] SCREAMING_SNAKE_CASE = torch.Size((1, 11, 768)) self.assertEqual(output.shape , a) SCREAMING_SNAKE_CASE = torch.tensor( [[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1E-4))
73
from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False): if radian_mode: return [magnitude * cos(_UpperCAmelCase), magnitude * sin(_UpperCAmelCase)] return [magnitude * cos(radians(_UpperCAmelCase)), magnitude * sin(radians(_UpperCAmelCase))] def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 10**-1): SCREAMING_SNAKE_CASE = cross(_UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = sum(_UpperCAmelCase) return abs(_UpperCAmelCase) < eps if __name__ == "__main__": # Test to check if it works a_ : int = array( [ polar_force(718.4, 1_80 - 30), polar_force(879.54, 45), polar_force(1_00, -90), ] ) a_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg a_ : Dict = array( [ polar_force(30 * 9.81, 15), polar_force(2_15, 1_80 - 45), polar_force(2_64, 90 - 30), ] ) a_ : Any = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg a_ : int = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]]) a_ : Optional[Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
73
1
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class _snake_case ( A__ , unittest.TestCase ): _lowercase : Dict = FlaxAutoencoderKL @property def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = 4 SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = (32, 32) SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0) SCREAMING_SNAKE_CASE = jax.random.uniform(a , ((batch_size, num_channels) + sizes)) return {"sample": image, "prng_key": prng_key} def SCREAMING_SNAKE_CASE__ ( self) -> Dict: SCREAMING_SNAKE_CASE = { 'block_out_channels': [32, 64], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 4, } SCREAMING_SNAKE_CASE = self.dummy_input return init_dict, inputs_dict
73
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Optional[int] = logging.get_logger(__name__) a_ : int = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class _snake_case ( A__ ): _lowercase : Dict = '''cvt''' def __init__( self , a=3 , a=[7, 3, 3] , a=[4, 2, 2] , a=[2, 1, 1] , a=[64, 192, 384] , a=[1, 3, 6] , a=[1, 2, 10] , a=[4.0, 4.0, 4.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.1] , a=[True, True, True] , a=[False, False, True] , a=["dw_bn", "dw_bn", "dw_bn"] , a=[3, 3, 3] , a=[1, 1, 1] , a=[2, 2, 2] , a=[1, 1, 1] , a=[1, 1, 1] , a=0.02 , a=1E-12 , **a , ) -> List[Any]: super().__init__(**a) SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = patch_sizes SCREAMING_SNAKE_CASE = patch_stride SCREAMING_SNAKE_CASE = patch_padding SCREAMING_SNAKE_CASE = embed_dim SCREAMING_SNAKE_CASE = num_heads SCREAMING_SNAKE_CASE = depth SCREAMING_SNAKE_CASE = mlp_ratio SCREAMING_SNAKE_CASE = attention_drop_rate SCREAMING_SNAKE_CASE = drop_rate SCREAMING_SNAKE_CASE = drop_path_rate SCREAMING_SNAKE_CASE = qkv_bias SCREAMING_SNAKE_CASE = cls_token SCREAMING_SNAKE_CASE = qkv_projection_method SCREAMING_SNAKE_CASE = kernel_qkv SCREAMING_SNAKE_CASE = padding_kv SCREAMING_SNAKE_CASE = stride_kv SCREAMING_SNAKE_CASE = padding_q SCREAMING_SNAKE_CASE = stride_q SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps
73
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a_ : Optional[Any] = { 'configuration_mask2former': [ 'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Mask2FormerConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Union[str, Any] = ['Mask2FormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ 'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
73
def lowerCamelCase__ (_UpperCAmelCase = 10 , _UpperCAmelCase = 1000 , _UpperCAmelCase = True): assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError('Invalid value for min_val or max_val (min_value < max_value)') return min_val if option else max_val def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return int((number_a + number_a) / 2) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) ), 'argument values must be type of "int"' if lower > higher: raise ValueError('argument value for lower and higher must be(lower > higher)') if not lower < to_guess < higher: raise ValueError( 'guess value must be within the range of lower and higher value') def answer(_UpperCAmelCase) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print('started...') SCREAMING_SNAKE_CASE = lower SCREAMING_SNAKE_CASE = higher SCREAMING_SNAKE_CASE = [] while True: SCREAMING_SNAKE_CASE = get_avg(_UpperCAmelCase , _UpperCAmelCase) last_numbers.append(_UpperCAmelCase) if answer(_UpperCAmelCase) == "low": SCREAMING_SNAKE_CASE = number elif answer(_UpperCAmelCase) == "high": SCREAMING_SNAKE_CASE = number else: break print(F'''guess the number : {last_numbers[-1]}''') print(F'''details : {last_numbers!s}''') def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = int(input('Enter lower value : ').strip()) SCREAMING_SNAKE_CASE = int(input('Enter high value : ').strip()) SCREAMING_SNAKE_CASE = int(input('Enter value to guess : ').strip()) guess_the_number(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) if __name__ == "__main__": main()
73
1
from queue import PriorityQueue from typing import Any import numpy as np def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): for nxt, d in graph[v]: if nxt in visited_forward: continue SCREAMING_SNAKE_CASE = cst_fwd.get(_UpperCAmelCase , np.inf) SCREAMING_SNAKE_CASE = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt)) SCREAMING_SNAKE_CASE = new_cost_f SCREAMING_SNAKE_CASE = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: SCREAMING_SNAKE_CASE = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = -1 SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = {source: 0} SCREAMING_SNAKE_CASE = {destination: 0} SCREAMING_SNAKE_CASE = {source: None} SCREAMING_SNAKE_CASE = {destination: None} SCREAMING_SNAKE_CASE = PriorityQueue() SCREAMING_SNAKE_CASE = PriorityQueue() SCREAMING_SNAKE_CASE = np.inf queue_forward.put((0, source)) queue_backward.put((0, destination)) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = queue_forward.get() visited_forward.add(_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = queue_backward.get() visited_backward.add(_UpperCAmelCase) SCREAMING_SNAKE_CASE = pass_and_relaxation( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) SCREAMING_SNAKE_CASE = pass_and_relaxation( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: SCREAMING_SNAKE_CASE = shortest_distance return shortest_path_distance a_ : Any = { 'B': [['C', 1]], 'C': [['D', 1]], 'D': [['F', 1]], 'E': [['B', 1], ['G', 2]], 'F': [], 'G': [['F', 1]], } a_ : Optional[int] = { 'B': [['E', 1]], 'C': [['B', 1]], 'D': [['C', 1]], 'F': [['D', 1], ['G', 1]], 'E': [[None, np.inf]], 'G': [['E', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
73
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class _snake_case : def __init__( self , a , a=13 , a=7 , a=True , a=True , a=False , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Union[str, Any]: SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_input_mask SCREAMING_SNAKE_CASE = use_token_type_ids SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = type_sequence_label_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = num_choices SCREAMING_SNAKE_CASE = scope def SCREAMING_SNAKE_CASE__ ( self) -> Dict: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE = None if self.use_input_mask: SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length]) SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices) SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , use_stable_embedding=a , ) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Any: SCREAMING_SNAKE_CASE = OpenLlamaModel(config=a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a) SCREAMING_SNAKE_CASE = model(a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = OpenLlamaModel(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , ) SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , ) SCREAMING_SNAKE_CASE = model(a , attention_mask=a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> int: SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a) model.to(a) model.eval() # first forward pass SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , ) SCREAMING_SNAKE_CASE = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size) SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1) SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0] SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0] # select random slice SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a , a , atol=1E-3)) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) = config_and_inputs SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _snake_case ( A__ , A__ , A__ , unittest.TestCase ): _lowercase : List[Any] = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) _lowercase : str = (OpenLlamaForCausalLM,) if is_torch_available() else () _lowercase : List[str] = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) _lowercase : List[str] = False _lowercase : Optional[int] = False def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , hidden_size=37) def SCREAMING_SNAKE_CASE__ ( self) -> str: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*a) def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = input_dict['input_ids'] SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a) SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = 'single_label_classification' SCREAMING_SNAKE_CASE = input_dict['input_ids'] SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a) SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = 'multi_label_classification' SCREAMING_SNAKE_CASE = input_dict['input_ids'] SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a) SCREAMING_SNAKE_CASE = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test') def SCREAMING_SNAKE_CASE__ ( self) -> Any: pass @parameterized.expand([('linear',), ('dynamic',)]) def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = ids_tensor([1, 10] , config.vocab_size) SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights SCREAMING_SNAKE_CASE = OpenLlamaModel(a) original_model.to(a) original_model.eval() SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0} SCREAMING_SNAKE_CASE = OpenLlamaModel(a) scaled_model.to(a) scaled_model.eval() SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(a , a , atol=1E-5)) else: self.assertFalse(torch.allclose(a , a , atol=1E-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(a , a , atol=1E-5))
73
1
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker a_ : Any = 'CompVis/stable-diffusion-v1-1' a_ : Union[str, Any] = 'CompVis/stable-diffusion-v1-2' a_ : str = 'CompVis/stable-diffusion-v1-3' a_ : Any = 'CompVis/stable-diffusion-v1-4' class _snake_case ( A__ ): def __init__( self , a , a , a , a , a , a , a , a = True , ) -> str: super()._init_() SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(a) SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(a) SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(a) SCREAMING_SNAKE_CASE = StableDiffusionPipeline( vae=a , text_encoder=a , tokenizer=a , unet=a , scheduler=a , safety_checker=a , feature_extractor=a , requires_safety_checker=a , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea) @property def SCREAMING_SNAKE_CASE__ ( self) -> Dict[str, Any]: return {k: getattr(self , a) for k in self.config.keys() if not k.startswith('_')} def SCREAMING_SNAKE_CASE__ ( self , a = "auto") -> int: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(a) def SCREAMING_SNAKE_CASE__ ( self) -> Dict: self.enable_attention_slicing(a) @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( self , a , a = 512 , a = 512 , a = 50 , a = 7.5 , a = None , a = 1 , a = 0.0 , a = None , a = None , a = "pil" , a = True , a = None , a = 1 , **a , ) -> Tuple: return self.pipea( prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , ) @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( self , a , a = 512 , a = 512 , a = 50 , a = 7.5 , a = None , a = 1 , a = 0.0 , a = None , a = None , a = "pil" , a = True , a = None , a = 1 , **a , ) -> Dict: return self.pipea( prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , ) @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( self , a , a = 512 , a = 512 , a = 50 , a = 7.5 , a = None , a = 1 , a = 0.0 , a = None , a = None , a = "pil" , a = True , a = None , a = 1 , **a , ) -> List[str]: return self.pipea( prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , ) @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( self , a , a = 512 , a = 512 , a = 50 , a = 7.5 , a = None , a = 1 , a = 0.0 , a = None , a = None , a = "pil" , a = True , a = None , a = 1 , **a , ) -> int: return self.pipea( prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , ) @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( self , a , a = 512 , a = 512 , a = 50 , a = 7.5 , a = None , a = 1 , a = 0.0 , a = None , a = None , a = "pil" , a = True , a = None , a = 1 , **a , ) -> Dict: SCREAMING_SNAKE_CASE = 'cuda' if torch.cuda.is_available() else 'cpu' self.to(a) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''') # Get first result from Stable Diffusion Checkpoint v1.1 SCREAMING_SNAKE_CASE = self.textaimg_sda_a( prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , ) # Get first result from Stable Diffusion Checkpoint v1.2 SCREAMING_SNAKE_CASE = self.textaimg_sda_a( prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , ) # Get first result from Stable Diffusion Checkpoint v1.3 SCREAMING_SNAKE_CASE = self.textaimg_sda_a( prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , ) # Get first result from Stable Diffusion Checkpoint v1.4 SCREAMING_SNAKE_CASE = self.textaimg_sda_a( prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]])
73
from __future__ import annotations a_ : str = [] def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): for i in range(len(_UpperCAmelCase)): if board[row][i] == 1: return False for i in range(len(_UpperCAmelCase)): if board[i][column] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , -1 , -1)): if board[i][j] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , len(_UpperCAmelCase))): if board[i][j] == 1: return False return True def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): if row >= len(_UpperCAmelCase): solution.append(_UpperCAmelCase) printboard(_UpperCAmelCase) print() return True for i in range(len(_UpperCAmelCase)): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = 1 solve(_UpperCAmelCase , row + 1) SCREAMING_SNAKE_CASE = 0 return False def lowerCamelCase__ (_UpperCAmelCase): for i in range(len(_UpperCAmelCase)): for j in range(len(_UpperCAmelCase)): if board[i][j] == 1: print('Q' , end=' ') else: print('.' , end=' ') print() # n=int(input("The no. of queens")) a_ : Tuple = 8 a_ : int = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
73
1
import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html a_ : Tuple = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class _snake_case : _lowercase : int = PegasusConfig _lowercase : str = {} _lowercase : Union[str, Any] = '''gelu''' def __init__( self , a , a=13 , a=7 , a=True , a=False , a=99 , a=32 , a=5 , a=4 , a=37 , a=0.1 , a=0.1 , a=20 , a=2 , a=1 , a=0 , ) -> int: SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = eos_token_id SCREAMING_SNAKE_CASE = pad_token_id SCREAMING_SNAKE_CASE = bos_token_id def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size) SCREAMING_SNAKE_CASE = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1) SCREAMING_SNAKE_CASE = np.concatenate([input_ids, eos_tensor] , axis=1) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) SCREAMING_SNAKE_CASE = prepare_pegasus_inputs_dict(a , a , a) return config, inputs_dict def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> str: SCREAMING_SNAKE_CASE = 20 SCREAMING_SNAKE_CASE = model_class_name(a) SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids']) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , a , a) SCREAMING_SNAKE_CASE = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4') SCREAMING_SNAKE_CASE = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) SCREAMING_SNAKE_CASE = model.decode( decoder_input_ids[:, :-1] , a , decoder_attention_mask=a , past_key_values=a , decoder_position_ids=a , ) SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4') SCREAMING_SNAKE_CASE = model.decode( decoder_input_ids[:, -1:] , a , decoder_attention_mask=a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a , ) SCREAMING_SNAKE_CASE = model.decode(a , a) SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''') def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Any: SCREAMING_SNAKE_CASE = 20 SCREAMING_SNAKE_CASE = model_class_name(a) SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids']) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) SCREAMING_SNAKE_CASE = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ] , axis=-1 , ) SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , a , a) SCREAMING_SNAKE_CASE = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) SCREAMING_SNAKE_CASE = model.decode( decoder_input_ids[:, :-1] , a , decoder_attention_mask=a , past_key_values=a , decoder_position_ids=a , ) SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4') SCREAMING_SNAKE_CASE = model.decode( decoder_input_ids[:, -1:] , a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a , decoder_position_ids=a , ) SCREAMING_SNAKE_CASE = model.decode(a , a , decoder_attention_mask=a) SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , ): if attention_mask is None: SCREAMING_SNAKE_CASE = np.not_equal(_UpperCAmelCase , config.pad_token_id).astype(np.inta) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id).astype(np.inta), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class _snake_case ( A__ , unittest.TestCase ): _lowercase : List[Any] = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) _lowercase : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () _lowercase : str = True _lowercase : List[str] = False _lowercase : Any = False _lowercase : Dict = False def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: SCREAMING_SNAKE_CASE = FlaxPegasusModelTester(self) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self) -> int: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(a , a , a) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(a , a , a) def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): SCREAMING_SNAKE_CASE = self._prepare_for_class(a , a) SCREAMING_SNAKE_CASE = model_class(a) @jax.jit def encode_jitted(a , a=None , **a): return model.encode(input_ids=a , attention_mask=a) with self.subTest('JIT Enabled'): SCREAMING_SNAKE_CASE = encode_jitted(**a).to_tuple() with self.subTest('JIT Disabled'): with jax.disable_jit(): SCREAMING_SNAKE_CASE = encode_jitted(**a).to_tuple() self.assertEqual(len(a) , len(a)) for jitted_output, output in zip(a , a): self.assertEqual(jitted_output.shape , output.shape) def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): SCREAMING_SNAKE_CASE = model_class(a) SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask']) SCREAMING_SNAKE_CASE = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(a , a , a): return model.decode( decoder_input_ids=a , decoder_attention_mask=a , encoder_outputs=a , ) with self.subTest('JIT Enabled'): SCREAMING_SNAKE_CASE = decode_jitted(**a).to_tuple() with self.subTest('JIT Disabled'): with jax.disable_jit(): SCREAMING_SNAKE_CASE = decode_jitted(**a).to_tuple() self.assertEqual(len(a) , len(a)) for jitted_output, output in zip(a , a): self.assertEqual(jitted_output.shape , output.shape) @slow def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class_name.from_pretrained('google/pegasus-large' , from_pt=a) SCREAMING_SNAKE_CASE = np.ones((1, 1)) SCREAMING_SNAKE_CASE = model(a) self.assertIsNotNone(a) @slow def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum') SCREAMING_SNAKE_CASE = PegasusTokenizer.from_pretrained('google/pegasus-xsum') SCREAMING_SNAKE_CASE = [ ' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.', ' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ', ] SCREAMING_SNAKE_CASE = [ 'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.', 'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.', ] SCREAMING_SNAKE_CASE = tokenizer(a , return_tensors='np' , truncation=a , max_length=512 , padding=a) SCREAMING_SNAKE_CASE = model.generate(**a , num_beams=2).sequences SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a , skip_special_tokens=a) assert tgt_text == decoded
73
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _snake_case ( A__ , A__ , unittest.TestCase ): _lowercase : List[Any] = StableDiffusionDiffEditPipeline _lowercase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} _lowercase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} _lowercase : List[str] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _lowercase : List[str] = frozenset([] ) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: torch.manual_seed(0) SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , ) SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , ) SCREAMING_SNAKE_CASE = DDIMInverseScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_zero=a , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) SCREAMING_SNAKE_CASE = CLIPTextModel(a) SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') SCREAMING_SNAKE_CASE = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]: SCREAMING_SNAKE_CASE = floats_tensor((1, 16, 16) , rng=random.Random(a)).to(a) SCREAMING_SNAKE_CASE = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a)).to(a) if str(a).startswith('mps'): SCREAMING_SNAKE_CASE = torch.manual_seed(a) else: SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a) SCREAMING_SNAKE_CASE = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]: SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a) SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0] SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB') if str(a).startswith('mps'): SCREAMING_SNAKE_CASE = torch.manual_seed(a) else: SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a) SCREAMING_SNAKE_CASE = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> Optional[int]: SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a) SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0] SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB') if str(a).startswith('mps'): SCREAMING_SNAKE_CASE = torch.manual_seed(a) else: SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a) SCREAMING_SNAKE_CASE = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: if not hasattr(self.pipeline_class , '_optional_components'): return SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(a , a , a) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components}) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a) SCREAMING_SNAKE_CASE = pipe(**a)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(a) SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(a) pipe_loaded.to(a) pipe_loaded.set_progress_bar_config(disable=a) for optional_component in pipe._optional_components: self.assertTrue( getattr(a , a) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a) SCREAMING_SNAKE_CASE = pipe_loaded(**a)[0] SCREAMING_SNAKE_CASE = np.abs(output - output_loaded).max() self.assertLess(a , 1E-4) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = 'cpu' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = self.get_dummy_mask_inputs(a) SCREAMING_SNAKE_CASE = pipe.generate_mask(**a) SCREAMING_SNAKE_CASE = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16)) SCREAMING_SNAKE_CASE = np.array([0] * 9) SCREAMING_SNAKE_CASE = np.abs(mask_slice.flatten() - expected_slice).max() self.assertLessEqual(a , 1E-3) self.assertEqual(mask[0, -3, -4] , 0) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = 'cpu' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(a) SCREAMING_SNAKE_CASE = pipe.invert(**a).images SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3)) SCREAMING_SNAKE_CASE = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(a , 1E-3) def SCREAMING_SNAKE_CASE__ ( self) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=5E-3) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = 'cpu' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'} SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler(**a) SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler(**a) SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(a) SCREAMING_SNAKE_CASE = pipe.invert(**a).images SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3)) SCREAMING_SNAKE_CASE = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(a , 1E-3) @require_torch_gpu @slow class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self) -> Any: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def SCREAMING_SNAKE_CASE__ ( cls) -> List[Any]: SCREAMING_SNAKE_CASE = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png') SCREAMING_SNAKE_CASE = raw_image.convert('RGB').resize((768, 768)) SCREAMING_SNAKE_CASE = raw_image def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = torch.manual_seed(0) SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa) SCREAMING_SNAKE_CASE = DDIMScheduler.from_config(pipe.scheduler.config) SCREAMING_SNAKE_CASE = DDIMInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = 'a bowl of fruit' SCREAMING_SNAKE_CASE = 'a bowl of pears' SCREAMING_SNAKE_CASE = pipe.generate_mask( image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , ) SCREAMING_SNAKE_CASE = pipe.invert( prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a).latents SCREAMING_SNAKE_CASE = pipe( prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , output_type='numpy' , ).images[0] SCREAMING_SNAKE_CASE = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png').resize((768, 768))) / 255 ) assert np.abs((expected_image - image).max()) < 5E-1 def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = torch.manual_seed(0) SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa) SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = 'a bowl of fruit' SCREAMING_SNAKE_CASE = 'a bowl of pears' SCREAMING_SNAKE_CASE = pipe.generate_mask( image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , ) SCREAMING_SNAKE_CASE = pipe.invert( prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a , num_inference_steps=25 , ).latents SCREAMING_SNAKE_CASE = pipe( prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0] SCREAMING_SNAKE_CASE = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png').resize((768, 768))) / 255 ) assert np.abs((expected_image - image).max()) < 5E-1
73
1
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class _snake_case ( A__ ): def __init__( self , a , a , a , a , ) -> Tuple: super().__init__() SCREAMING_SNAKE_CASE = value_function SCREAMING_SNAKE_CASE = unet SCREAMING_SNAKE_CASE = scheduler SCREAMING_SNAKE_CASE = env SCREAMING_SNAKE_CASE = env.get_dataset() SCREAMING_SNAKE_CASE = {} for key in self.data.keys(): try: SCREAMING_SNAKE_CASE = self.data[key].mean() except: # noqa: E722 pass SCREAMING_SNAKE_CASE = {} for key in self.data.keys(): try: SCREAMING_SNAKE_CASE = self.data[key].std() except: # noqa: E722 pass SCREAMING_SNAKE_CASE = env.observation_space.shape[0] SCREAMING_SNAKE_CASE = env.action_space.shape[0] def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Tuple: return (x_in - self.means[key]) / self.stds[key] def SCREAMING_SNAKE_CASE__ ( self , a , a) -> List[str]: return x_in * self.stds[key] + self.means[key] def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]: if type(a) is dict: return {k: self.to_torch(a) for k, v in x_in.items()} elif torch.is_tensor(a): return x_in.to(self.unet.device) return torch.tensor(a , device=self.unet.device) def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> List[Any]: for key, val in cond.items(): SCREAMING_SNAKE_CASE = val.clone() return x_in def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a) -> str: SCREAMING_SNAKE_CASE = x.shape[0] SCREAMING_SNAKE_CASE = None for i in tqdm.tqdm(self.scheduler.timesteps): # create batch of timesteps to pass into model SCREAMING_SNAKE_CASE = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long) for _ in range(a): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models SCREAMING_SNAKE_CASE = self.value_function(x.permute(0 , 2 , 1) , a).sample SCREAMING_SNAKE_CASE = torch.autograd.grad([y.sum()] , [x])[0] SCREAMING_SNAKE_CASE = self.scheduler._get_variance(a) SCREAMING_SNAKE_CASE = torch.exp(0.5 * posterior_variance) SCREAMING_SNAKE_CASE = model_std * grad SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = x.detach() SCREAMING_SNAKE_CASE = x + scale * grad SCREAMING_SNAKE_CASE = self.reset_xa(a , a , self.action_dim) SCREAMING_SNAKE_CASE = self.unet(x.permute(0 , 2 , 1) , a).sample.permute(0 , 2 , 1) # TODO: verify deprecation of this kwarg SCREAMING_SNAKE_CASE = self.scheduler.step(a , a , a , predict_epsilon=a)['prev_sample'] # apply conditions to the trajectory (set the initial state) SCREAMING_SNAKE_CASE = self.reset_xa(a , a , self.action_dim) SCREAMING_SNAKE_CASE = self.to_torch(a) return x, y def __call__( self , a , a=64 , a=32 , a=2 , a=0.1) -> Optional[Any]: # normalize the observations and create batch dimension SCREAMING_SNAKE_CASE = self.normalize(a , 'observations') SCREAMING_SNAKE_CASE = obs[None].repeat(a , axis=0) SCREAMING_SNAKE_CASE = {0: self.to_torch(a)} SCREAMING_SNAKE_CASE = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) SCREAMING_SNAKE_CASE = randn_tensor(a , device=self.unet.device) SCREAMING_SNAKE_CASE = self.reset_xa(a , a , self.action_dim) SCREAMING_SNAKE_CASE = self.to_torch(a) # run the diffusion process SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.run_diffusion(a , a , a , a) # sort output trajectories by value SCREAMING_SNAKE_CASE = y.argsort(0 , descending=a).squeeze() SCREAMING_SNAKE_CASE = x[sorted_idx] SCREAMING_SNAKE_CASE = sorted_values[:, :, : self.action_dim] SCREAMING_SNAKE_CASE = actions.detach().cpu().numpy() SCREAMING_SNAKE_CASE = self.de_normalize(a , key='actions') # select the action with the highest value if y is not None: SCREAMING_SNAKE_CASE = 0 else: # if we didn't run value guiding, select a random action SCREAMING_SNAKE_CASE = np.random.randint(0 , a) SCREAMING_SNAKE_CASE = denorm_actions[selected_index, 0] return denorm_actions
73
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : List[str] = logging.get_logger(__name__) a_ : Any = { 'microsoft/unispeech-large-1500h-cv': ( 'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class _snake_case ( A__ ): _lowercase : Optional[int] = '''unispeech''' def __init__( self , a=32 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=0.1 , a=0.0 , a=0.0 , a=0.1 , a=0.1 , a=0.02 , a=1E-5 , a="group" , a="gelu" , a=(512, 512, 512, 512, 512, 512, 512) , a=(5, 2, 2, 2, 2, 2, 2) , a=(10, 3, 3, 3, 3, 2, 2) , a=False , a=128 , a=16 , a=False , a=True , a=0.05 , a=10 , a=2 , a=0.0 , a=10 , a=0 , a=320 , a=2 , a=0.1 , a=100 , a=256 , a=256 , a=0.1 , a="mean" , a=False , a=False , a=256 , a=80 , a=0 , a=1 , a=2 , a=0.5 , **a , ) -> Optional[int]: super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = feat_extract_norm SCREAMING_SNAKE_CASE = feat_extract_activation SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = conv_bias SCREAMING_SNAKE_CASE = num_conv_pos_embeddings SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups SCREAMING_SNAKE_CASE = len(self.conv_dim) SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = activation_dropout SCREAMING_SNAKE_CASE = feat_proj_dropout SCREAMING_SNAKE_CASE = final_dropout SCREAMING_SNAKE_CASE = layerdrop SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = num_ctc_classes SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = do_stable_layer_norm SCREAMING_SNAKE_CASE = use_weighted_layer_sum SCREAMING_SNAKE_CASE = classifier_proj_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' f''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE = apply_spec_augment SCREAMING_SNAKE_CASE = mask_time_prob SCREAMING_SNAKE_CASE = mask_time_length SCREAMING_SNAKE_CASE = mask_time_min_masks SCREAMING_SNAKE_CASE = mask_feature_prob SCREAMING_SNAKE_CASE = mask_feature_length SCREAMING_SNAKE_CASE = mask_feature_min_masks # parameters for pretraining with codevector quantized representations SCREAMING_SNAKE_CASE = num_codevectors_per_group SCREAMING_SNAKE_CASE = num_codevector_groups SCREAMING_SNAKE_CASE = contrastive_logits_temperature SCREAMING_SNAKE_CASE = feat_quantizer_dropout SCREAMING_SNAKE_CASE = num_negatives SCREAMING_SNAKE_CASE = codevector_dim SCREAMING_SNAKE_CASE = proj_codevector_dim SCREAMING_SNAKE_CASE = diversity_loss_weight # ctc loss SCREAMING_SNAKE_CASE = ctc_loss_reduction SCREAMING_SNAKE_CASE = ctc_zero_infinity # pretraining loss SCREAMING_SNAKE_CASE = replace_prob @property def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: return functools.reduce(operator.mul , self.conv_stride , 1)
73
1
from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None): if version.parse(hfh.__version__).release < version.parse('0.11.0').release: # old versions of hfh don't url-encode the file path SCREAMING_SNAKE_CASE = quote(_UpperCAmelCase) return hfh.hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' , revision=_UpperCAmelCase)
73
import argparse import collections import json import os import re import string import sys import numpy as np a_ : Optional[Any] = re.compile(R'\b(a|an|the)\b', re.UNICODE) a_ : List[str] = None def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.') parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.') parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.') parser.add_argument( '--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).') parser.add_argument( '--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.') parser.add_argument( '--na-prob-thresh' , '-t' , type=_UpperCAmelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , ) parser.add_argument( '--out-image-dir' , '-p' , metavar='out_images' , default=_UpperCAmelCase , help='Save precision-recall curves to directory.') parser.add_argument('--verbose' , '-v' , action='store_true') if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args() def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: SCREAMING_SNAKE_CASE = bool(qa['answers']['text']) return qid_to_has_ans def lowerCamelCase__ (_UpperCAmelCase): def remove_articles(_UpperCAmelCase): return ARTICLES_REGEX.sub(' ' , _UpperCAmelCase) def white_space_fix(_UpperCAmelCase): return " ".join(text.split()) def remove_punc(_UpperCAmelCase): SCREAMING_SNAKE_CASE = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(_UpperCAmelCase): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase)))) def lowerCamelCase__ (_UpperCAmelCase): if not s: return [] return normalize_answer(_UpperCAmelCase).split() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return int(normalize_answer(_UpperCAmelCase) == normalize_answer(_UpperCAmelCase)) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase) SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase) SCREAMING_SNAKE_CASE = collections.Counter(_UpperCAmelCase) & collections.Counter(_UpperCAmelCase) SCREAMING_SNAKE_CASE = sum(common.values()) if len(_UpperCAmelCase) == 0 or len(_UpperCAmelCase) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks) if num_same == 0: return 0 SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase) SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase) SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: SCREAMING_SNAKE_CASE = qa['id'] SCREAMING_SNAKE_CASE = [t for t in qa['answers']['text'] if normalize_answer(_UpperCAmelCase)] if not gold_answers: # For unanswerable questions, only correct answer is empty string SCREAMING_SNAKE_CASE = [''] if qid not in preds: print(F'''Missing prediction for {qid}''') continue SCREAMING_SNAKE_CASE = preds[qid] # Take max over all gold answers SCREAMING_SNAKE_CASE = max(compute_exact(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers) SCREAMING_SNAKE_CASE = max(compute_fa(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers) return exact_scores, fa_scores def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = {} for qid, s in scores.items(): SCREAMING_SNAKE_CASE = na_probs[qid] > na_prob_thresh if pred_na: SCREAMING_SNAKE_CASE = float(not qid_to_has_ans[qid]) else: SCREAMING_SNAKE_CASE = s return new_scores def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None): if not qid_list: SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) return collections.OrderedDict( [ ('exact', 1_00.0 * sum(exact_scores.values()) / total), ('f1', 1_00.0 * sum(fa_scores.values()) / total), ('total', total), ]) else: SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) return collections.OrderedDict( [ ('exact', 1_00.0 * sum(exact_scores[k] for k in qid_list) / total), ('f1', 1_00.0 * sum(fa_scores[k] for k in qid_list) / total), ('total', total), ]) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): for k in new_eval: SCREAMING_SNAKE_CASE = new_eval[k] def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): plt.step(_UpperCAmelCase , _UpperCAmelCase , color='b' , alpha=0.2 , where='post') plt.fill_between(_UpperCAmelCase , _UpperCAmelCase , step='post' , alpha=0.2 , color='b') plt.xlabel('Recall') plt.ylabel('Precision') plt.xlim([0.0, 1.05]) plt.ylim([0.0, 1.05]) plt.title(_UpperCAmelCase) plt.savefig(_UpperCAmelCase) plt.clf() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None): SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k]) SCREAMING_SNAKE_CASE = 0.0 SCREAMING_SNAKE_CASE = 1.0 SCREAMING_SNAKE_CASE = 0.0 SCREAMING_SNAKE_CASE = [1.0] SCREAMING_SNAKE_CASE = [0.0] SCREAMING_SNAKE_CASE = 0.0 for i, qid in enumerate(_UpperCAmelCase): if qid_to_has_ans[qid]: true_pos += scores[qid] SCREAMING_SNAKE_CASE = true_pos / float(i + 1) SCREAMING_SNAKE_CASE = true_pos / float(_UpperCAmelCase) if i == len(_UpperCAmelCase) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(_UpperCAmelCase) recalls.append(_UpperCAmelCase) if out_image: plot_pr_curve(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) return {"ap": 1_00.0 * avg_prec} def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): if out_image_dir and not os.path.exists(_UpperCAmelCase): os.makedirs(_UpperCAmelCase) SCREAMING_SNAKE_CASE = sum(1 for v in qid_to_has_ans.values() if v) if num_true_pos == 0: return SCREAMING_SNAKE_CASE = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_exact.png') , title='Precision-Recall curve for Exact Match score' , ) SCREAMING_SNAKE_CASE = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_f1.png') , title='Precision-Recall curve for F1 score' , ) SCREAMING_SNAKE_CASE = {k: float(_UpperCAmelCase) for k, v in qid_to_has_ans.items()} SCREAMING_SNAKE_CASE = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_oracle.png') , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , ) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_exact') merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_f1') merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_oracle') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): if not qid_list: return SCREAMING_SNAKE_CASE = [na_probs[k] for k in qid_list] SCREAMING_SNAKE_CASE = np.ones_like(_UpperCAmelCase) / float(len(_UpperCAmelCase)) plt.hist(_UpperCAmelCase , weights=_UpperCAmelCase , bins=20 , range=(0.0, 1.0)) plt.xlabel('Model probability of no-answer') plt.ylabel('Proportion of dataset') plt.title(F'''Histogram of no-answer probability: {name}''') plt.savefig(os.path.join(_UpperCAmelCase , F'''na_prob_hist_{name}.png''')) plt.clf() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) SCREAMING_SNAKE_CASE = num_no_ans SCREAMING_SNAKE_CASE = cur_score SCREAMING_SNAKE_CASE = 0.0 SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k]) for i, qid in enumerate(_UpperCAmelCase): if qid not in scores: continue if qid_to_has_ans[qid]: SCREAMING_SNAKE_CASE = scores[qid] else: if preds[qid]: SCREAMING_SNAKE_CASE = -1 else: SCREAMING_SNAKE_CASE = 0 cur_score += diff if cur_score > best_score: SCREAMING_SNAKE_CASE = cur_score SCREAMING_SNAKE_CASE = na_probs[qid] return 1_00.0 * best_score / len(_UpperCAmelCase), best_thresh def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = best_exact SCREAMING_SNAKE_CASE = exact_thresh SCREAMING_SNAKE_CASE = best_fa SCREAMING_SNAKE_CASE = fa_thresh def lowerCamelCase__ (): with open(OPTS.data_file) as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) SCREAMING_SNAKE_CASE = dataset_json['data'] with open(OPTS.pred_file) as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) if OPTS.na_prob_file: with open(OPTS.na_prob_file) as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) else: SCREAMING_SNAKE_CASE = {k: 0.0 for k in preds} SCREAMING_SNAKE_CASE = make_qid_to_has_ans(_UpperCAmelCase) # maps qid to True/False SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if v] SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if not v] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_raw_scores(_UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh) SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh) SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase) if has_ans_qids: SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'HasAns') if no_ans_qids: SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'NoAns') if OPTS.na_prob_file: find_all_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir) histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'hasAns') histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'noAns') if OPTS.out_file: with open(OPTS.out_file , 'w') as f: json.dump(_UpperCAmelCase , _UpperCAmelCase) else: print(json.dumps(_UpperCAmelCase , indent=2)) if __name__ == "__main__": a_ : Any = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt main()
73
1
def lowerCamelCase__ (_UpperCAmelCase = 5000_0000): SCREAMING_SNAKE_CASE = set() SCREAMING_SNAKE_CASE = int((limit - 24) ** (1 / 2)) SCREAMING_SNAKE_CASE = set(range(3 , prime_square_limit + 1 , 2)) primes.add(2) for p in range(3 , prime_square_limit + 1 , 2): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , _UpperCAmelCase))) for primea in primes: SCREAMING_SNAKE_CASE = primea * primea for primea in primes: SCREAMING_SNAKE_CASE = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: SCREAMING_SNAKE_CASE = primea * primea * primea * primea SCREAMING_SNAKE_CASE = square + cube + tetr if total >= limit: break ret.add(_UpperCAmelCase) return len(_UpperCAmelCase) if __name__ == "__main__": print(f"""{solution() = }""")
73
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor a_ : Dict = logging.get_logger(__name__) class _snake_case ( A__ ): def __init__( self , *a , **a) -> None: warnings.warn( 'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use GLPNImageProcessor instead.' , a , ) super().__init__(*a , **a)
73
1
from math import factorial, pi def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 30): if not isinstance(_UpperCAmelCase , (int, float)): raise ValueError('maclaurin_sin() requires either an int or float for theta') if not isinstance(_UpperCAmelCase , _UpperCAmelCase) or accuracy <= 0: raise ValueError('maclaurin_sin() requires a positive int for accuracy') SCREAMING_SNAKE_CASE = float(_UpperCAmelCase) SCREAMING_SNAKE_CASE = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1) for r in range(_UpperCAmelCase)) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 30): if not isinstance(_UpperCAmelCase , (int, float)): raise ValueError('maclaurin_cos() requires either an int or float for theta') if not isinstance(_UpperCAmelCase , _UpperCAmelCase) or accuracy <= 0: raise ValueError('maclaurin_cos() requires a positive int for accuracy') SCREAMING_SNAKE_CASE = float(_UpperCAmelCase) SCREAMING_SNAKE_CASE = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r) for r in range(_UpperCAmelCase)) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) print(maclaurin_sin(-10, 15)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(10, 15)) print(maclaurin_cos(-10, 15))
73
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class _snake_case ( unittest.TestCase , A__ ): def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = load_tool('text-classification') self.tool.setup() SCREAMING_SNAKE_CASE = load_tool('text-classification' , remote=a) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = self.tool('That\'s quite cool' , ['positive', 'negative']) self.assertEqual(a , 'positive') def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: SCREAMING_SNAKE_CASE = self.remote_tool('That\'s quite cool' , ['positive', 'negative']) self.assertEqual(a , 'positive') def SCREAMING_SNAKE_CASE__ ( self) -> int: SCREAMING_SNAKE_CASE = self.tool(text='That\'s quite cool' , labels=['positive', 'negative']) self.assertEqual(a , 'positive') def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative']) self.assertEqual(a , 'positive')
73
1
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position a_ : Any = '2.13.1' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('3.7'): raise ImportWarning( 'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( 'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n' 'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip a_ : str = concatenate_datasets a_ : List[Any] = DownloadConfig a_ : Any = DownloadManager a_ : Union[str, Any] = DownloadMode a_ : Dict = DownloadConfig a_ : List[str] = DownloadMode a_ : Union[str, Any] = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
73
import sys import turtle def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): my_pen.up() my_pen.goto(vertexa[0] , vertexa[1]) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1]) my_pen.goto(vertexa[0] , vertexa[1]) my_pen.goto(vertexa[0] , vertexa[1]) if depth == 0: return triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1) triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1) triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( 'Correct format for using this script: ' 'python fractals.py <int:depth_for_fractal>' ) a_ : Any = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('red') a_ : str = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
73
1
class _snake_case : def __init__( self) -> Optional[Any]: SCREAMING_SNAKE_CASE = {} def SCREAMING_SNAKE_CASE__ ( self) -> None: print(self.vertex) for i in self.vertex: print(a , ' -> ' , ' -> '.join([str(a) for j in self.vertex[i]])) def SCREAMING_SNAKE_CASE__ ( self , a , a) -> None: # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(a) else: # else make a new vertex SCREAMING_SNAKE_CASE = [to_vertex] def SCREAMING_SNAKE_CASE__ ( self) -> None: # visited array for storing already visited nodes SCREAMING_SNAKE_CASE = [False] * len(self.vertex) # call the recursive helper function for i in range(len(self.vertex)): if not visited[i]: self.dfs_recursive(a , a) def SCREAMING_SNAKE_CASE__ ( self , a , a) -> None: # mark start vertex as visited SCREAMING_SNAKE_CASE = True print(a , end=' ') # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(a , a) if __name__ == "__main__": a_ : Any = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print('DFS:') g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
73
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed a_ : Any = 'true' def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=16): set_seed(42) SCREAMING_SNAKE_CASE = RegressionModel() SCREAMING_SNAKE_CASE = deepcopy(_UpperCAmelCase) SCREAMING_SNAKE_CASE = RegressionDataset(length=_UpperCAmelCase) SCREAMING_SNAKE_CASE = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase) model.to(accelerator.device) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase) return model, ddp_model, dataloader def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased') SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' , split='validation') def tokenize_function(_UpperCAmelCase): SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase) return outputs with accelerator.main_process_first(): SCREAMING_SNAKE_CASE = dataset.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels') def collate_fn(_UpperCAmelCase): if use_longest: return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt') return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt') return DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=16) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = Accelerator(dispatch_batches=_UpperCAmelCase , split_batches=_UpperCAmelCase) SCREAMING_SNAKE_CASE = get_dataloader(_UpperCAmelCase , not dispatch_batches) SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained( 'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [] for batch in dataloader: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((logit, target)) logits_and_targets.append((logit, target)) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], [] for logit, targ in logits_and_targets: logits.append(_UpperCAmelCase) targs.append(_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.cat(_UpperCAmelCase), torch.cat(_UpperCAmelCase) return logits, targs def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=16): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_basic_setup(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_predictions(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) assert ( len(_UpperCAmelCase) == num_samples ), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCAmelCase)}''' def lowerCamelCase__ (_UpperCAmelCase = False , _UpperCAmelCase = False): SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc') SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_mrpc_setup(_UpperCAmelCase , _UpperCAmelCase) # First do baseline SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['no'] model.to(_UpperCAmelCase) model.eval() for batch in dataloader: batch.to(_UpperCAmelCase) with torch.inference_mode(): SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1) metric.add_batch(predictions=_UpperCAmelCase , references=batch['labels']) SCREAMING_SNAKE_CASE = metric.compute() # Then do distributed SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['ddp'] model.eval() for batch in dataloader: with torch.inference_mode(): SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1) SCREAMING_SNAKE_CASE = batch['labels'] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((preds, references)) metric.add_batch(predictions=_UpperCAmelCase , references=_UpperCAmelCase) SCREAMING_SNAKE_CASE = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key]), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('**Testing gather_for_metrics**') for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''') test_mrpc(_UpperCAmelCase , _UpperCAmelCase) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test torch metrics**') for split_batches in [True, False]: for dispatch_batches in [True, False]: SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase) if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''') test_torch_metrics(_UpperCAmelCase , 99) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test last batch is not dropped when perfectly divisible**') SCREAMING_SNAKE_CASE = Accelerator() test_torch_metrics(_UpperCAmelCase , 512) accelerator.state._reset_state() def lowerCamelCase__ (_UpperCAmelCase): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
73
1
# Algorithm for the pigeonhole sorting def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = min(_UpperCAmelCase) # min() finds the minimum value SCREAMING_SNAKE_CASE = max(_UpperCAmelCase) # max() finds the maximum value SCREAMING_SNAKE_CASE = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size SCREAMING_SNAKE_CASE = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(_UpperCAmelCase , _UpperCAmelCase), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. SCREAMING_SNAKE_CASE = 0 for count in range(_UpperCAmelCase): while holes[count] > 0: holes[count] -= 1 SCREAMING_SNAKE_CASE = count + min_val i += 1 def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(_UpperCAmelCase) print('Sorted order is:' , ' '.join(_UpperCAmelCase)) if __name__ == "__main__": main()
73
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available a_ : List[str] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ['GPTSw3Tokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys a_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
73
1
from cva import destroyAllWindows, imread, imshow, waitKey def lowerCamelCase__ (_UpperCAmelCase): # getting number of pixels in the image SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(_UpperCAmelCase): for j in range(_UpperCAmelCase): SCREAMING_SNAKE_CASE = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image a_ : int = imread('image_data/lena.jpg', 1) # convert to its negative a_ : List[Any] = convert_to_negative(img) # show result image imshow('negative of original image', img) waitKey(0) destroyAllWindows()
73
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path a_ : str = [ {'dataset': 'wikipedia', 'config_name': '20220301.de'}, {'dataset': 'wikipedia', 'config_name': '20220301.en'}, {'dataset': 'wikipedia', 'config_name': '20220301.fr'}, {'dataset': 'wikipedia', 'config_name': '20220301.frr'}, {'dataset': 'wikipedia', 'config_name': '20220301.it'}, {'dataset': 'wikipedia', 'config_name': '20220301.simple'}, {'dataset': 'snli', 'config_name': 'plain_text'}, {'dataset': 'eli5', 'config_name': 'LFQA_reddit'}, {'dataset': 'wiki40b', 'config_name': 'en'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'}, {'dataset': 'natural_questions', 'config_name': 'default'}, ] def lowerCamelCase__ (_UpperCAmelCase=True): if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A__ ) ) class _snake_case ( A__ ): _lowercase : Optional[Any] = None _lowercase : Optional[Any] = None def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Optional[Any]: with TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE = dataset_module_factory(a , cache_dir=a) SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=a) SCREAMING_SNAKE_CASE = builder_cls( cache_dir=a , config_name=a , hash=dataset_module.hash , ) SCREAMING_SNAKE_CASE = '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=a).replace(os.sep , '/'), config.DATASET_INFO_FILENAME, ]) SCREAMING_SNAKE_CASE = cached_path(a , cache_dir=a) self.assertTrue(os.path.exists(a)) @pytest.mark.integration def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('test_hf_gcp') / 'test_wikipedia_simple' SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase) SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path) SCREAMING_SNAKE_CASE = builder_cls( cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam SCREAMING_SNAKE_CASE = None builder_instance.download_and_prepare() SCREAMING_SNAKE_CASE = builder_instance.as_dataset() assert ds @pytest.mark.integration def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase) SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=_UpperCAmelCase) SCREAMING_SNAKE_CASE = builder_cls( cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , ) SCREAMING_SNAKE_CASE = builder_instance.as_streaming_dataset() assert ds assert isinstance(_UpperCAmelCase , _UpperCAmelCase) assert "train" in ds assert isinstance(ds['train'] , _UpperCAmelCase) assert next(iter(ds['train']))
73
1
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class _snake_case ( datasets.BeamBasedBuilder ): def SCREAMING_SNAKE_CASE__ ( self) -> int: return datasets.DatasetInfo( features=datasets.Features({'content': datasets.Value('string')}) , supervised_keys=a , ) def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Any: return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()})] def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Union[str, Any]: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(a) class _snake_case ( datasets.BeamBasedBuilder ): def SCREAMING_SNAKE_CASE__ ( self) -> str: return datasets.DatasetInfo( features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string')})}) , supervised_keys=a , ) def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Union[str, Any]: return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()}) ] def SCREAMING_SNAKE_CASE__ ( self , a , a) -> int: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(a) def lowerCamelCase__ (): return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'])] def lowerCamelCase__ (): return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'])] class _snake_case ( A__ ): @require_beam def SCREAMING_SNAKE_CASE__ ( self) -> Dict: SCREAMING_SNAKE_CASE = len(get_test_dummy_examples()) with tempfile.TemporaryDirectory() as tmp_cache_dir: SCREAMING_SNAKE_CASE = DummyBeamDataset(cache_dir=a , beam_runner='DirectRunner') builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(a , builder.name , 'default' , '0.0.0' , f'''{builder.name}-train.arrow'''))) self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string')})) SCREAMING_SNAKE_CASE = builder.as_dataset() self.assertEqual(dset['train'].num_rows , a) self.assertEqual(dset['train'].info.splits['train'].num_examples , a) self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1]) self.assertDictEqual( dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1]) self.assertTrue( os.path.exists(os.path.join(a , builder.name , 'default' , '0.0.0' , 'dataset_info.json'))) del dset @require_beam def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: import apache_beam as beam SCREAMING_SNAKE_CASE = beam.io.parquetio.WriteToParquet SCREAMING_SNAKE_CASE = len(get_test_dummy_examples()) with tempfile.TemporaryDirectory() as tmp_cache_dir: SCREAMING_SNAKE_CASE = DummyBeamDataset(cache_dir=a , beam_runner='DirectRunner') with patch('apache_beam.io.parquetio.WriteToParquet') as write_parquet_mock: SCREAMING_SNAKE_CASE = partial(a , num_shards=2) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( a , builder.name , 'default' , '0.0.0' , f'''{builder.name}-train-00000-of-00002.arrow'''))) self.assertTrue( os.path.exists( os.path.join( a , builder.name , 'default' , '0.0.0' , f'''{builder.name}-train-00000-of-00002.arrow'''))) self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string')})) SCREAMING_SNAKE_CASE = builder.as_dataset() self.assertEqual(dset['train'].num_rows , a) self.assertEqual(dset['train'].info.splits['train'].num_examples , a) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['train']['content']) , sorted(['foo', 'bar', 'foobar'])) self.assertTrue( os.path.exists(os.path.join(a , builder.name , 'default' , '0.0.0' , 'dataset_info.json'))) del dset @require_beam def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: with tempfile.TemporaryDirectory() as tmp_cache_dir: SCREAMING_SNAKE_CASE = DummyBeamDataset(cache_dir=a) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare) @require_beam def SCREAMING_SNAKE_CASE__ ( self) -> Dict: SCREAMING_SNAKE_CASE = len(get_test_nested_examples()) with tempfile.TemporaryDirectory() as tmp_cache_dir: SCREAMING_SNAKE_CASE = NestedBeamDataset(cache_dir=a , beam_runner='DirectRunner') builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(a , builder.name , 'default' , '0.0.0' , f'''{builder.name}-train.arrow'''))) self.assertDictEqual( builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string')})})) SCREAMING_SNAKE_CASE = builder.as_dataset() self.assertEqual(dset['train'].num_rows , a) self.assertEqual(dset['train'].info.splits['train'].num_examples , a) self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1]) self.assertDictEqual( dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1]) self.assertTrue( os.path.exists(os.path.join(a , builder.name , 'default' , '0.0.0' , 'dataset_info.json'))) del dset
73
from __future__ import annotations def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = 2 SCREAMING_SNAKE_CASE = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(_UpperCAmelCase) if n > 1: factors.append(_UpperCAmelCase) return factors if __name__ == "__main__": import doctest doctest.testmod()
73
1
from __future__ import annotations class _snake_case : def __init__( self , a) -> None: SCREAMING_SNAKE_CASE = data SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None def lowerCamelCase__ (_UpperCAmelCase): # In Order traversal of the tree if tree: display(tree.left) print(tree.data) display(tree.right) def lowerCamelCase__ (_UpperCAmelCase): return 1 + max(depth_of_tree(tree.left) , depth_of_tree(tree.right)) if tree else 0 def lowerCamelCase__ (_UpperCAmelCase): if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left) and is_full_binary_tree(tree.right) else: return not tree.left and not tree.right def lowerCamelCase__ (): # Main function for testing. SCREAMING_SNAKE_CASE = Node(1) SCREAMING_SNAKE_CASE = Node(2) SCREAMING_SNAKE_CASE = Node(3) SCREAMING_SNAKE_CASE = Node(4) SCREAMING_SNAKE_CASE = Node(5) SCREAMING_SNAKE_CASE = Node(6) SCREAMING_SNAKE_CASE = Node(7) SCREAMING_SNAKE_CASE = Node(8) SCREAMING_SNAKE_CASE = Node(9) print(is_full_binary_tree(_UpperCAmelCase)) print(depth_of_tree(_UpperCAmelCase)) print('Tree is: ') display(_UpperCAmelCase) if __name__ == "__main__": main()
73
import math import os import sys def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = '' try: with open(_UpperCAmelCase , 'rb') as binary_file: SCREAMING_SNAKE_CASE = binary_file.read() for dat in data: SCREAMING_SNAKE_CASE = F'''{dat:08b}''' result += curr_byte return result except OSError: print('File not accessible') sys.exit() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): lexicon.pop(_UpperCAmelCase) SCREAMING_SNAKE_CASE = last_match_id if math.loga(_UpperCAmelCase).is_integer(): for curr_key in lexicon: SCREAMING_SNAKE_CASE = '0' + lexicon[curr_key] SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:] def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = {'0': '0', '1': '1'} SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = '', '' SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) for i in range(len(_UpperCAmelCase)): curr_string += data_bits[i] if curr_string not in lexicon: continue SCREAMING_SNAKE_CASE = lexicon[curr_string] result += last_match_id add_key_to_lexicon(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) index += 1 SCREAMING_SNAKE_CASE = '' while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": SCREAMING_SNAKE_CASE = lexicon[curr_string] result += last_match_id return result def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = os.path.getsize(_UpperCAmelCase) SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:] SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) return "0" * (length_length - 1) + file_length_binary + compressed def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = 8 try: with open(_UpperCAmelCase , 'wb') as opened_file: SCREAMING_SNAKE_CASE = [ to_write[i : i + byte_length] for i in range(0 , len(_UpperCAmelCase) , _UpperCAmelCase) ] if len(result_byte_array[-1]) % byte_length == 0: result_byte_array.append('10000000') else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1]) - 1 ) for elem in result_byte_array: opened_file.write(int(_UpperCAmelCase , 2).to_bytes(1 , byteorder='big')) except OSError: print('File not accessible') sys.exit() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = read_file_binary(_UpperCAmelCase) SCREAMING_SNAKE_CASE = compress_data(_UpperCAmelCase) SCREAMING_SNAKE_CASE = add_file_length(_UpperCAmelCase , _UpperCAmelCase) write_file_binary(_UpperCAmelCase , _UpperCAmelCase) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
73
1
def lowerCamelCase__ (_UpperCAmelCase): return "".join(chr(ord(_UpperCAmelCase) - 32) if 'a' <= char <= 'z' else char for char in word) if __name__ == "__main__": from doctest import testmod testmod()
73
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def lowerCamelCase__ (_UpperCAmelCase): return 1.0 / (1.0 + np.exp(-_outputs)) def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = np.max(_outputs , axis=-1 , keepdims=_UpperCAmelCase) SCREAMING_SNAKE_CASE = np.exp(_outputs - maxes) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_UpperCAmelCase) class _snake_case ( A__ ): _lowercase : Tuple = '''sigmoid''' _lowercase : List[str] = '''softmax''' _lowercase : Tuple = '''none''' @add_end_docstrings( A__ , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class _snake_case ( A__ ): _lowercase : Optional[Any] = False _lowercase : Tuple = ClassificationFunction.NONE def __init__( self , **a) -> Optional[Any]: super().__init__(**a) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING) def SCREAMING_SNAKE_CASE__ ( self , a=None , a=None , a="" , **a) -> Tuple: # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" SCREAMING_SNAKE_CASE = tokenizer_kwargs SCREAMING_SNAKE_CASE = {} if hasattr(self.model.config , 'return_all_scores') and return_all_scores is None: SCREAMING_SNAKE_CASE = self.model.config.return_all_scores if isinstance(a , a) or top_k is None: SCREAMING_SNAKE_CASE = top_k SCREAMING_SNAKE_CASE = False elif return_all_scores is not None: warnings.warn( '`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of' ' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , a , ) if return_all_scores: SCREAMING_SNAKE_CASE = None else: SCREAMING_SNAKE_CASE = 1 if isinstance(a , a): SCREAMING_SNAKE_CASE = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: SCREAMING_SNAKE_CASE = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *a , **a) -> Optional[int]: SCREAMING_SNAKE_CASE = super().__call__(*a , **a) # TODO try and retrieve it in a nicer way from _sanitize_parameters. SCREAMING_SNAKE_CASE = 'top_k' not in kwargs if isinstance(args[0] , a) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def SCREAMING_SNAKE_CASE__ ( self , a , **a) -> Dict[str, GenericTensor]: SCREAMING_SNAKE_CASE = self.framework if isinstance(a , a): return self.tokenizer(**a , return_tensors=a , **a) elif isinstance(a , a) and len(a) == 1 and isinstance(inputs[0] , a) and len(inputs[0]) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a , **a) elif isinstance(a , a): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( 'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a' ' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.') return self.tokenizer(a , return_tensors=a , **a) def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]: return self.model(**a) def SCREAMING_SNAKE_CASE__ ( self , a , a=None , a=1 , a=True) -> Any: # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: SCREAMING_SNAKE_CASE = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: SCREAMING_SNAKE_CASE = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , 'function_to_apply') and function_to_apply is None: SCREAMING_SNAKE_CASE = self.model.config.function_to_apply else: SCREAMING_SNAKE_CASE = ClassificationFunction.NONE SCREAMING_SNAKE_CASE = model_outputs['logits'][0] SCREAMING_SNAKE_CASE = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: SCREAMING_SNAKE_CASE = sigmoid(a) elif function_to_apply == ClassificationFunction.SOFTMAX: SCREAMING_SNAKE_CASE = softmax(a) elif function_to_apply == ClassificationFunction.NONE: SCREAMING_SNAKE_CASE = outputs else: raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''') if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} SCREAMING_SNAKE_CASE = [ {'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(a) ] if not _legacy: dict_scores.sort(key=lambda a: x["score"] , reverse=a) if top_k is not None: SCREAMING_SNAKE_CASE = dict_scores[:top_k] return dict_scores
73
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : List[str] = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class _snake_case ( A__ ): _lowercase : int = '''yolos''' def __init__( self , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.0 , a=0.0 , a=0.02 , a=1E-12 , a=[512, 864] , a=16 , a=3 , a=True , a=100 , a=True , a=False , a=1 , a=5 , a=2 , a=5 , a=2 , a=0.1 , **a , ) -> Dict: super().__init__(**a) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = qkv_bias SCREAMING_SNAKE_CASE = num_detection_tokens SCREAMING_SNAKE_CASE = use_mid_position_embeddings SCREAMING_SNAKE_CASE = auxiliary_loss # Hungarian matcher SCREAMING_SNAKE_CASE = class_cost SCREAMING_SNAKE_CASE = bbox_cost SCREAMING_SNAKE_CASE = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE = bbox_loss_coefficient SCREAMING_SNAKE_CASE = giou_loss_coefficient SCREAMING_SNAKE_CASE = eos_coefficient class _snake_case ( A__ ): _lowercase : int = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def SCREAMING_SNAKE_CASE__ ( self) -> float: return 1E-4 @property def SCREAMING_SNAKE_CASE__ ( self) -> int: return 12
73
import heapq as hq import math from collections.abc import Iterator class _snake_case : def __init__( self , a) -> Optional[Any]: SCREAMING_SNAKE_CASE = str(id_) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = {} # {vertex:distance} def __lt__( self , a) -> Dict: return self.key < other.key def __repr__( self) -> Optional[Any]: return self.id def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]: self.neighbors.append(a) def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Tuple: SCREAMING_SNAKE_CASE = weight def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1]) graph[b - 1].add_neighbor(graph[a - 1]) # add the edges: graph[a - 1].add_edge(graph[b - 1] , _UpperCAmelCase) graph[b - 1].add_edge(graph[a - 1] , _UpperCAmelCase) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [] for u in graph: SCREAMING_SNAKE_CASE = math.inf SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = graph[:] while q: SCREAMING_SNAKE_CASE = min(_UpperCAmelCase) q.remove(_UpperCAmelCase) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): SCREAMING_SNAKE_CASE = u SCREAMING_SNAKE_CASE = u.edges[v.id] for i in range(1 , len(_UpperCAmelCase)): a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1)) return a def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): for u in graph: SCREAMING_SNAKE_CASE = math.inf SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = list(_UpperCAmelCase) hq.heapify(_UpperCAmelCase) while h: SCREAMING_SNAKE_CASE = hq.heappop(_UpperCAmelCase) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): SCREAMING_SNAKE_CASE = u SCREAMING_SNAKE_CASE = u.edges[v.id] hq.heapify(_UpperCAmelCase) for i in range(1 , len(_UpperCAmelCase)): yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1) def lowerCamelCase__ (): pass if __name__ == "__main__": import doctest doctest.testmod()
73
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig a_ : Optional[Any] = logging.get_logger(__name__) a_ : Tuple = { 'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json', # See all DPT models at https://huggingface.co/models?filter=dpt } class _snake_case ( A__ ): _lowercase : Optional[int] = '''dpt''' def __init__( self , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.0 , a=0.0 , a=0.02 , a=1E-12 , a=384 , a=16 , a=3 , a=False , a=True , a=[2, 5, 8, 11] , a="project" , a=[4, 2, 1, 0.5] , a=[96, 192, 384, 768] , a=256 , a=-1 , a=False , a=True , a=0.4 , a=255 , a=0.1 , a=[1, 1024, 24, 24] , a=[0, 1] , a=None , **a , ) -> List[str]: super().__init__(**a) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('Initializing the config with a `BiT` backbone.') SCREAMING_SNAKE_CASE = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, } SCREAMING_SNAKE_CASE = BitConfig(**a) elif isinstance(a , a): logger.info('Initializing the config with a `BiT` backbone.') SCREAMING_SNAKE_CASE = BitConfig(**a) elif isinstance(a , a): SCREAMING_SNAKE_CASE = backbone_config else: raise ValueError( f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''') SCREAMING_SNAKE_CASE = backbone_featmap_shape SCREAMING_SNAKE_CASE = neck_ignore_stages if readout_type != "project": raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.') else: SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = qkv_bias SCREAMING_SNAKE_CASE = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']') SCREAMING_SNAKE_CASE = readout_type SCREAMING_SNAKE_CASE = reassemble_factors SCREAMING_SNAKE_CASE = neck_hidden_sizes SCREAMING_SNAKE_CASE = fusion_hidden_size SCREAMING_SNAKE_CASE = head_in_index SCREAMING_SNAKE_CASE = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) SCREAMING_SNAKE_CASE = use_auxiliary_head SCREAMING_SNAKE_CASE = auxiliary_loss_weight SCREAMING_SNAKE_CASE = semantic_loss_ignore_index SCREAMING_SNAKE_CASE = semantic_classifier_dropout def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__) if output["backbone_config"] is not None: SCREAMING_SNAKE_CASE = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE = self.__class__.model_type return output
73
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a_ : Optional[Any] = { 'configuration_mask2former': [ 'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Mask2FormerConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Union[str, Any] = ['Mask2FormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ 'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
73
1
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ : str = 16 a_ : List[Any] = 32 def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 16): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('bert-base-cased') SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc') def tokenize_function(_UpperCAmelCase): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels') def collate_fn(_UpperCAmelCase): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE = 8 else: SCREAMING_SNAKE_CASE = None return tokenizer.pad( _UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase) SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders a_ : Optional[Any] = mocked_dataloaders # noqa: F811 def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS' , _UpperCAmelCase) == "1": SCREAMING_SNAKE_CASE = 2 # Initialize accelerator SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE = config['lr'] SCREAMING_SNAKE_CASE = int(config['num_epochs']) SCREAMING_SNAKE_CASE = int(config['seed']) SCREAMING_SNAKE_CASE = int(config['batch_size']) SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc') # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=_UpperCAmelCase) def inner_training_loop(_UpperCAmelCase): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(_UpperCAmelCase) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE = model.to(accelerator.device) # Instantiate optimizer SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase) # Instantiate scheduler SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) # Now we train the model for epoch in range(_UpperCAmelCase): model.train() for step, batch in enumerate(_UpperCAmelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.loss accelerator.backward(_UpperCAmelCase) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCAmelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch['labels'])) metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) SCREAMING_SNAKE_CASE = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , _UpperCAmelCase) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Simple example of training script.') parser.add_argument( '--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.') SCREAMING_SNAKE_CASE = parser.parse_args() SCREAMING_SNAKE_CASE = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(_UpperCAmelCase , _UpperCAmelCase) if __name__ == "__main__": main()
73
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Dict = logging.get_logger(__name__) a_ : Union[str, Any] = { 'edbeeching/decision-transformer-gym-hopper-medium': ( 'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class _snake_case ( A__ ): _lowercase : Optional[Any] = '''decision_transformer''' _lowercase : str = ['''past_key_values'''] _lowercase : Union[str, Any] = { '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , a=17 , a=4 , a=128 , a=4096 , a=True , a=1 , a=1024 , a=3 , a=1 , a=None , a="relu" , a=0.1 , a=0.1 , a=0.1 , a=1E-5 , a=0.02 , a=True , a=True , a=5_0256 , a=5_0256 , a=False , a=False , **a , ) -> List[str]: SCREAMING_SNAKE_CASE = state_dim SCREAMING_SNAKE_CASE = act_dim SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = max_ep_len SCREAMING_SNAKE_CASE = action_tanh SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = n_positions SCREAMING_SNAKE_CASE = n_layer SCREAMING_SNAKE_CASE = n_head SCREAMING_SNAKE_CASE = n_inner SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = resid_pdrop SCREAMING_SNAKE_CASE = embd_pdrop SCREAMING_SNAKE_CASE = attn_pdrop SCREAMING_SNAKE_CASE = layer_norm_epsilon SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = scale_attn_weights SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx SCREAMING_SNAKE_CASE = reorder_and_upcast_attn SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = eos_token_id super().__init__(bos_token_id=a , eos_token_id=a , **a)
73
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) a_ : Optional[int] = {'processing_layoutxlm': ['LayoutXLMProcessor']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Dict = ['LayoutXLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : int = ['LayoutXLMTokenizerFast'] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
73
import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ : Optional[int] = 16 a_ : Any = 32 def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 16): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('bert-base-cased') SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc') def tokenize_function(_UpperCAmelCase): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels') def collate_fn(_UpperCAmelCase): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE = 8 else: SCREAMING_SNAKE_CASE = None return tokenizer.pad( _UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase) SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == 'fp8') , ) return train_dataloader, eval_dataloader def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): # Initialize accelerator SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE = config['lr'] SCREAMING_SNAKE_CASE = int(config['num_epochs']) SCREAMING_SNAKE_CASE = int(config['seed']) SCREAMING_SNAKE_CASE = int(config['batch_size']) SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc') # If the batch size is too big we use gradient accumulation SCREAMING_SNAKE_CASE = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: SCREAMING_SNAKE_CASE = batch_size // MAX_GPU_BATCH_SIZE SCREAMING_SNAKE_CASE = MAX_GPU_BATCH_SIZE set_seed(_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE = model.to(accelerator.device) # Instantiate optimizer SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=_UpperCAmelCase) # Instantiate scheduler SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) # Now we train the model for epoch in range(_UpperCAmelCase): model.train() for step, batch in enumerate(_UpperCAmelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.loss SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps accelerator.backward(_UpperCAmelCase) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCAmelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch['labels'])) metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) SCREAMING_SNAKE_CASE = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , _UpperCAmelCase) def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Simple example of training script.') parser.add_argument( '--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.') SCREAMING_SNAKE_CASE = parser.parse_args() SCREAMING_SNAKE_CASE = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(_UpperCAmelCase , _UpperCAmelCase) if __name__ == "__main__": main()
73
1
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _snake_case ( unittest.TestCase ): def __init__( self , a , a=7 , a=3 , a=18 , a=30 , a=400 , a=True , a=None , a=True , ) -> Tuple: SCREAMING_SNAKE_CASE = size if size is not None else {'height': 18, 'width': 18} SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = min_resolution SCREAMING_SNAKE_CASE = max_resolution SCREAMING_SNAKE_CASE = do_resize SCREAMING_SNAKE_CASE = size SCREAMING_SNAKE_CASE = apply_ocr def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _snake_case ( A__ , unittest.TestCase ): _lowercase : Any = LayoutLMvaImageProcessor if is_pytesseract_available() else None def SCREAMING_SNAKE_CASE__ ( self) -> int: SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessingTester(self) @property def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(a , 'do_resize')) self.assertTrue(hasattr(a , 'size')) self.assertTrue(hasattr(a , 'apply_ocr')) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 18, 'width': 18}) SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42) self.assertEqual(image_processor.size , {'height': 42, 'width': 42}) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: pass def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: # Initialize image_processing SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=a) for image in image_inputs: self.assertIsInstance(a , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt') self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) self.assertIsInstance(encoding.words , a) self.assertIsInstance(encoding.boxes , a) # Test batched SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def SCREAMING_SNAKE_CASE__ ( self) -> Dict: # Initialize image_processing SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a) for image in image_inputs: self.assertIsInstance(a , np.ndarray) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def SCREAMING_SNAKE_CASE__ ( self) -> str: # Initialize image_processing SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a) for image in image_inputs: self.assertIsInstance(a , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: # with apply_OCR = True SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor() from datasets import load_dataset SCREAMING_SNAKE_CASE = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test') SCREAMING_SNAKE_CASE = Image.open(ds[0]['file']).convert('RGB') SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt') self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224)) self.assertEqual(len(encoding.words) , len(encoding.boxes)) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 SCREAMING_SNAKE_CASE = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 SCREAMING_SNAKE_CASE = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , a) self.assertListEqual(encoding.boxes , a) # with apply_OCR = False SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor(apply_ocr=a) SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt') self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
73
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a_ : int = { 'configuration_rag': ['RagConfig'], 'retrieval_rag': ['RagRetriever'], 'tokenization_rag': ['RagTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ 'RagModel', 'RagPreTrainedModel', 'RagSequenceForGeneration', 'RagTokenForGeneration', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Tuple = [ 'TFRagModel', 'TFRagPreTrainedModel', 'TFRagSequenceForGeneration', 'TFRagTokenForGeneration', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
73
1
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return [sentence[i : i + ngram_size] for i in range(len(_UpperCAmelCase) - ngram_size + 1)] if __name__ == "__main__": from doctest import testmod testmod()
73
from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False): if radian_mode: return [magnitude * cos(_UpperCAmelCase), magnitude * sin(_UpperCAmelCase)] return [magnitude * cos(radians(_UpperCAmelCase)), magnitude * sin(radians(_UpperCAmelCase))] def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 10**-1): SCREAMING_SNAKE_CASE = cross(_UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = sum(_UpperCAmelCase) return abs(_UpperCAmelCase) < eps if __name__ == "__main__": # Test to check if it works a_ : int = array( [ polar_force(718.4, 1_80 - 30), polar_force(879.54, 45), polar_force(1_00, -90), ] ) a_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg a_ : Dict = array( [ polar_force(30 * 9.81, 15), polar_force(2_15, 1_80 - 45), polar_force(2_64, 90 - 30), ] ) a_ : Any = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg a_ : int = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]]) a_ : Optional[Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
73
1
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Any = logging.get_logger(__name__) a_ : List[Any] = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'} class _snake_case ( A__ ): _lowercase : Optional[Any] = '''ctrl''' _lowercase : str = ['''past_key_values'''] _lowercase : Optional[Any] = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , a=24_6534 , a=256 , a=1280 , a=8192 , a=48 , a=16 , a=0.1 , a=0.1 , a=1E-6 , a=0.02 , a=True , **a , ) -> List[str]: SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = n_positions SCREAMING_SNAKE_CASE = n_embd SCREAMING_SNAKE_CASE = n_layer SCREAMING_SNAKE_CASE = n_head SCREAMING_SNAKE_CASE = dff SCREAMING_SNAKE_CASE = resid_pdrop SCREAMING_SNAKE_CASE = embd_pdrop SCREAMING_SNAKE_CASE = layer_norm_epsilon SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = use_cache super().__init__(**a)
73
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Optional[int] = logging.get_logger(__name__) a_ : int = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class _snake_case ( A__ ): _lowercase : Dict = '''cvt''' def __init__( self , a=3 , a=[7, 3, 3] , a=[4, 2, 2] , a=[2, 1, 1] , a=[64, 192, 384] , a=[1, 3, 6] , a=[1, 2, 10] , a=[4.0, 4.0, 4.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.1] , a=[True, True, True] , a=[False, False, True] , a=["dw_bn", "dw_bn", "dw_bn"] , a=[3, 3, 3] , a=[1, 1, 1] , a=[2, 2, 2] , a=[1, 1, 1] , a=[1, 1, 1] , a=0.02 , a=1E-12 , **a , ) -> List[Any]: super().__init__(**a) SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = patch_sizes SCREAMING_SNAKE_CASE = patch_stride SCREAMING_SNAKE_CASE = patch_padding SCREAMING_SNAKE_CASE = embed_dim SCREAMING_SNAKE_CASE = num_heads SCREAMING_SNAKE_CASE = depth SCREAMING_SNAKE_CASE = mlp_ratio SCREAMING_SNAKE_CASE = attention_drop_rate SCREAMING_SNAKE_CASE = drop_rate SCREAMING_SNAKE_CASE = drop_path_rate SCREAMING_SNAKE_CASE = qkv_bias SCREAMING_SNAKE_CASE = cls_token SCREAMING_SNAKE_CASE = qkv_projection_method SCREAMING_SNAKE_CASE = kernel_qkv SCREAMING_SNAKE_CASE = padding_kv SCREAMING_SNAKE_CASE = stride_kv SCREAMING_SNAKE_CASE = padding_q SCREAMING_SNAKE_CASE = stride_q SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps
73
1
def lowerCamelCase__ (_UpperCAmelCase = 100): SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 for i in range(1 , n + 1): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
73
def lowerCamelCase__ (_UpperCAmelCase = 10 , _UpperCAmelCase = 1000 , _UpperCAmelCase = True): assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError('Invalid value for min_val or max_val (min_value < max_value)') return min_val if option else max_val def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return int((number_a + number_a) / 2) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) ), 'argument values must be type of "int"' if lower > higher: raise ValueError('argument value for lower and higher must be(lower > higher)') if not lower < to_guess < higher: raise ValueError( 'guess value must be within the range of lower and higher value') def answer(_UpperCAmelCase) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print('started...') SCREAMING_SNAKE_CASE = lower SCREAMING_SNAKE_CASE = higher SCREAMING_SNAKE_CASE = [] while True: SCREAMING_SNAKE_CASE = get_avg(_UpperCAmelCase , _UpperCAmelCase) last_numbers.append(_UpperCAmelCase) if answer(_UpperCAmelCase) == "low": SCREAMING_SNAKE_CASE = number elif answer(_UpperCAmelCase) == "high": SCREAMING_SNAKE_CASE = number else: break print(F'''guess the number : {last_numbers[-1]}''') print(F'''details : {last_numbers!s}''') def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = int(input('Enter lower value : ').strip()) SCREAMING_SNAKE_CASE = int(input('Enter high value : ').strip()) SCREAMING_SNAKE_CASE = int(input('Enter value to guess : ').strip()) guess_the_number(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) if __name__ == "__main__": main()
73
1
from typing import Any def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): _validation( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) # Creates data structures and fill initial step SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = {} for state in states_space: SCREAMING_SNAKE_CASE = observations_space[0] SCREAMING_SNAKE_CASE = ( initial_probabilities[state] * emission_probabilities[state][observation] ) SCREAMING_SNAKE_CASE = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(_UpperCAmelCase)): SCREAMING_SNAKE_CASE = observations_space[o] SCREAMING_SNAKE_CASE = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function SCREAMING_SNAKE_CASE = '' SCREAMING_SNAKE_CASE = -1 for k_state in states_space: SCREAMING_SNAKE_CASE = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: SCREAMING_SNAKE_CASE = probability SCREAMING_SNAKE_CASE = k_state # Update probabilities and pointers dicts SCREAMING_SNAKE_CASE = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) SCREAMING_SNAKE_CASE = arg_max # The final observation SCREAMING_SNAKE_CASE = observations_space[len(_UpperCAmelCase) - 1] # argmax for given final observation SCREAMING_SNAKE_CASE = '' SCREAMING_SNAKE_CASE = -1 for k_state in states_space: SCREAMING_SNAKE_CASE = probabilities[(k_state, final_observation)] if probability > max_probability: SCREAMING_SNAKE_CASE = probability SCREAMING_SNAKE_CASE = k_state SCREAMING_SNAKE_CASE = arg_max # Process pointers backwards SCREAMING_SNAKE_CASE = last_state SCREAMING_SNAKE_CASE = [] for o in range(len(_UpperCAmelCase) - 1 , -1 , -1): result.append(_UpperCAmelCase) SCREAMING_SNAKE_CASE = pointers[previous, observations_space[o]] result.reverse() return result def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): _validate_not_empty( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) _validate_lists(_UpperCAmelCase , _UpperCAmelCase) _validate_dicts( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ]): raise ValueError('There\'s an empty parameter') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): _validate_list(_UpperCAmelCase , 'observations_space') _validate_list(_UpperCAmelCase , 'states_space') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): if not isinstance(_object , _UpperCAmelCase): SCREAMING_SNAKE_CASE = F'''{var_name} must be a list''' raise ValueError(_UpperCAmelCase) else: for x in _object: if not isinstance(_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = F'''{var_name} must be a list of strings''' raise ValueError(_UpperCAmelCase) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): _validate_dict(_UpperCAmelCase , 'initial_probabilities' , _UpperCAmelCase) _validate_nested_dict(_UpperCAmelCase , 'transition_probabilities') _validate_nested_dict(_UpperCAmelCase , 'emission_probabilities') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): _validate_dict(_object , _UpperCAmelCase , _UpperCAmelCase) for x in _object.values(): _validate_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False): if not isinstance(_object , _UpperCAmelCase): SCREAMING_SNAKE_CASE = F'''{var_name} must be a dict''' raise ValueError(_UpperCAmelCase) if not all(isinstance(_UpperCAmelCase , _UpperCAmelCase) for x in _object): SCREAMING_SNAKE_CASE = F'''{var_name} all keys must be strings''' raise ValueError(_UpperCAmelCase) if not all(isinstance(_UpperCAmelCase , _UpperCAmelCase) for x in _object.values()): SCREAMING_SNAKE_CASE = 'nested dictionary ' if nested else '' SCREAMING_SNAKE_CASE = F'''{var_name} {nested_text}all values must be {value_type.__name__}''' raise ValueError(_UpperCAmelCase) if __name__ == "__main__": from doctest import testmod testmod()
73
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class _snake_case : def __init__( self , a , a=13 , a=7 , a=True , a=True , a=False , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Union[str, Any]: SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_input_mask SCREAMING_SNAKE_CASE = use_token_type_ids SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = type_sequence_label_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = num_choices SCREAMING_SNAKE_CASE = scope def SCREAMING_SNAKE_CASE__ ( self) -> Dict: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE = None if self.use_input_mask: SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length]) SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices) SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , use_stable_embedding=a , ) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Any: SCREAMING_SNAKE_CASE = OpenLlamaModel(config=a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a) SCREAMING_SNAKE_CASE = model(a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = OpenLlamaModel(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , ) SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , ) SCREAMING_SNAKE_CASE = model(a , attention_mask=a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> int: SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a) model.to(a) model.eval() # first forward pass SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , ) SCREAMING_SNAKE_CASE = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size) SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1) SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0] SCREAMING_SNAKE_CASE = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0] # select random slice SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a , a , atol=1E-3)) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) = config_and_inputs SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _snake_case ( A__ , A__ , A__ , unittest.TestCase ): _lowercase : List[Any] = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) _lowercase : str = (OpenLlamaForCausalLM,) if is_torch_available() else () _lowercase : List[str] = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) _lowercase : List[str] = False _lowercase : Optional[int] = False def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , hidden_size=37) def SCREAMING_SNAKE_CASE__ ( self) -> str: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*a) def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = input_dict['input_ids'] SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a) SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = 'single_label_classification' SCREAMING_SNAKE_CASE = input_dict['input_ids'] SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a) SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = 'multi_label_classification' SCREAMING_SNAKE_CASE = input_dict['input_ids'] SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a) SCREAMING_SNAKE_CASE = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a) model.to(a) model.eval() SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test') def SCREAMING_SNAKE_CASE__ ( self) -> Any: pass @parameterized.expand([('linear',), ('dynamic',)]) def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = ids_tensor([1, 10] , config.vocab_size) SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights SCREAMING_SNAKE_CASE = OpenLlamaModel(a) original_model.to(a) original_model.eval() SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0} SCREAMING_SNAKE_CASE = OpenLlamaModel(a) scaled_model.to(a) scaled_model.eval() SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(a , a , atol=1E-5)) else: self.assertFalse(torch.allclose(a , a , atol=1E-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(a , a , atol=1E-5))
73
1
from math import pi def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
73
from __future__ import annotations a_ : str = [] def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): for i in range(len(_UpperCAmelCase)): if board[row][i] == 1: return False for i in range(len(_UpperCAmelCase)): if board[i][column] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , -1 , -1)): if board[i][j] == 1: return False for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , len(_UpperCAmelCase))): if board[i][j] == 1: return False return True def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): if row >= len(_UpperCAmelCase): solution.append(_UpperCAmelCase) printboard(_UpperCAmelCase) print() return True for i in range(len(_UpperCAmelCase)): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = 1 solve(_UpperCAmelCase , row + 1) SCREAMING_SNAKE_CASE = 0 return False def lowerCamelCase__ (_UpperCAmelCase): for i in range(len(_UpperCAmelCase)): for j in range(len(_UpperCAmelCase)): if board[i][j] == 1: print('Q' , end=' ') else: print('.' , end=' ') print() # n=int(input("The no. of queens")) a_ : Tuple = 8 a_ : int = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
73
1
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: SCREAMING_SNAKE_CASE = ['a', 'b', 'c'] # Defaults to last layer if both are None SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(a , a , a) self.assertEqual(a , ['c']) self.assertEqual(a , [2]) # Out indices set to match out features SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(['a', 'c'] , a , a) self.assertEqual(a , ['a', 'c']) self.assertEqual(a , [0, 2]) # Out features set to match out indices SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(a , [0, 2] , a) self.assertEqual(a , ['a', 'c']) self.assertEqual(a , [0, 2]) # Out features selected from negative indices SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(a , [-3, -1] , a) self.assertEqual(a , ['a', 'c']) self.assertEqual(a , [-3, -1]) def SCREAMING_SNAKE_CASE__ ( self) -> int: # Stage names must be set with self.assertRaises(a): verify_out_features_out_indices(['a', 'b'] , (0, 1) , a) # Out features must be a list with self.assertRaises(a): verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b']) # Out features must be a subset of stage names with self.assertRaises(a): verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a']) # Out indices must be a list or tuple with self.assertRaises(a): verify_out_features_out_indices(a , 0 , ['a', 'b']) # Out indices must be a subset of stage names with self.assertRaises(a): verify_out_features_out_indices(a , (0, 1) , ['a']) # Out features and out indices must be the same length with self.assertRaises(a): verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c']) # Out features should match out indices with self.assertRaises(a): verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c']) # Out features and out indices should be in order with self.assertRaises(a): verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b']) # Check passes with valid inputs verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd']) def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = BackboneMixin() SCREAMING_SNAKE_CASE = ['a', 'b', 'c'] SCREAMING_SNAKE_CASE = ['a', 'c'] SCREAMING_SNAKE_CASE = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ['a', 'c']) self.assertEqual(backbone.out_indices , [0, 2]) # Check out features and indices are updated correctly SCREAMING_SNAKE_CASE = ['a', 'b'] self.assertEqual(backbone.out_features , ['a', 'b']) self.assertEqual(backbone.out_indices , [0, 1]) SCREAMING_SNAKE_CASE = [-3, -1] self.assertEqual(backbone.out_features , ['a', 'c']) self.assertEqual(backbone.out_indices , [-3, -1])
73
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _snake_case ( A__ , A__ , unittest.TestCase ): _lowercase : List[Any] = StableDiffusionDiffEditPipeline _lowercase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} _lowercase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} _lowercase : List[str] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _lowercase : List[str] = frozenset([] ) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: torch.manual_seed(0) SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , ) SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , ) SCREAMING_SNAKE_CASE = DDIMInverseScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_zero=a , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) SCREAMING_SNAKE_CASE = CLIPTextModel(a) SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') SCREAMING_SNAKE_CASE = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]: SCREAMING_SNAKE_CASE = floats_tensor((1, 16, 16) , rng=random.Random(a)).to(a) SCREAMING_SNAKE_CASE = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a)).to(a) if str(a).startswith('mps'): SCREAMING_SNAKE_CASE = torch.manual_seed(a) else: SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a) SCREAMING_SNAKE_CASE = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]: SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a) SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0] SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB') if str(a).startswith('mps'): SCREAMING_SNAKE_CASE = torch.manual_seed(a) else: SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a) SCREAMING_SNAKE_CASE = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> Optional[int]: SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a) SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0] SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB') if str(a).startswith('mps'): SCREAMING_SNAKE_CASE = torch.manual_seed(a) else: SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a) SCREAMING_SNAKE_CASE = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: if not hasattr(self.pipeline_class , '_optional_components'): return SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(a , a , a) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components}) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a) SCREAMING_SNAKE_CASE = pipe(**a)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(a) SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(a) pipe_loaded.to(a) pipe_loaded.set_progress_bar_config(disable=a) for optional_component in pipe._optional_components: self.assertTrue( getattr(a , a) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a) SCREAMING_SNAKE_CASE = pipe_loaded(**a)[0] SCREAMING_SNAKE_CASE = np.abs(output - output_loaded).max() self.assertLess(a , 1E-4) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = 'cpu' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = self.get_dummy_mask_inputs(a) SCREAMING_SNAKE_CASE = pipe.generate_mask(**a) SCREAMING_SNAKE_CASE = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16)) SCREAMING_SNAKE_CASE = np.array([0] * 9) SCREAMING_SNAKE_CASE = np.abs(mask_slice.flatten() - expected_slice).max() self.assertLessEqual(a , 1E-3) self.assertEqual(mask[0, -3, -4] , 0) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = 'cpu' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(a) SCREAMING_SNAKE_CASE = pipe.invert(**a).images SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3)) SCREAMING_SNAKE_CASE = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(a , 1E-3) def SCREAMING_SNAKE_CASE__ ( self) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=5E-3) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = 'cpu' SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'} SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler(**a) SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler(**a) SCREAMING_SNAKE_CASE = self.pipeline_class(**a) pipe.to(a) pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(a) SCREAMING_SNAKE_CASE = pipe.invert(**a).images SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3)) SCREAMING_SNAKE_CASE = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , ) SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(a , 1E-3) @require_torch_gpu @slow class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self) -> Any: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def SCREAMING_SNAKE_CASE__ ( cls) -> List[Any]: SCREAMING_SNAKE_CASE = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png') SCREAMING_SNAKE_CASE = raw_image.convert('RGB').resize((768, 768)) SCREAMING_SNAKE_CASE = raw_image def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = torch.manual_seed(0) SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa) SCREAMING_SNAKE_CASE = DDIMScheduler.from_config(pipe.scheduler.config) SCREAMING_SNAKE_CASE = DDIMInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = 'a bowl of fruit' SCREAMING_SNAKE_CASE = 'a bowl of pears' SCREAMING_SNAKE_CASE = pipe.generate_mask( image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , ) SCREAMING_SNAKE_CASE = pipe.invert( prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a).latents SCREAMING_SNAKE_CASE = pipe( prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , output_type='numpy' , ).images[0] SCREAMING_SNAKE_CASE = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png').resize((768, 768))) / 255 ) assert np.abs((expected_image - image).max()) < 5E-1 def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = torch.manual_seed(0) SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa) SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a) SCREAMING_SNAKE_CASE = 'a bowl of fruit' SCREAMING_SNAKE_CASE = 'a bowl of pears' SCREAMING_SNAKE_CASE = pipe.generate_mask( image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , ) SCREAMING_SNAKE_CASE = pipe.invert( prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a , num_inference_steps=25 , ).latents SCREAMING_SNAKE_CASE = pipe( prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0] SCREAMING_SNAKE_CASE = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png').resize((768, 768))) / 255 ) assert np.abs((expected_image - image).max()) < 5E-1
73
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : str = logging.get_logger(__name__) a_ : List[Any] = { 'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json', 'Salesforce/blip-vqa-capfit-large': ( 'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-base': ( 'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-large': ( 'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json' ), 'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json', 'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json', 'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json', 'Salesforce/blip-itm-large-flikr': ( 'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json' ), } class _snake_case ( A__ ): _lowercase : List[str] = '''blip_text_model''' def __init__( self , a=3_0524 , a=768 , a=768 , a=3072 , a=768 , a=12 , a=8 , a=512 , a="gelu" , a=1E-12 , a=0.0 , a=0.0 , a=0.02 , a=3_0522 , a=2 , a=0 , a=102 , a=True , a=True , **a , ) -> Any: super().__init__( pad_token_id=a , bos_token_id=a , eos_token_id=a , sep_token_id=a , **a , ) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = encoder_hidden_size SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = projection_dim SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = is_decoder SCREAMING_SNAKE_CASE = use_cache @classmethod def SCREAMING_SNAKE_CASE__ ( cls , a , **a) -> "PretrainedConfig": cls._set_token_in_kwargs(a) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cls.get_config_dict(a , **a) # get the text config dict if we are loading from BlipConfig if config_dict.get('model_type') == "blip": SCREAMING_SNAKE_CASE = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''') return cls.from_dict(a , **a) class _snake_case ( A__ ): _lowercase : Any = '''blip_vision_model''' def __init__( self , a=768 , a=3072 , a=512 , a=12 , a=12 , a=384 , a=16 , a="gelu" , a=1E-5 , a=0.0 , a=1E-10 , **a , ) -> Optional[int]: super().__init__(**a) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = projection_dim SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = hidden_act @classmethod def SCREAMING_SNAKE_CASE__ ( cls , a , **a) -> "PretrainedConfig": cls._set_token_in_kwargs(a) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cls.get_config_dict(a , **a) # get the vision config dict if we are loading from BlipConfig if config_dict.get('model_type') == "blip": SCREAMING_SNAKE_CASE = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''') return cls.from_dict(a , **a) class _snake_case ( A__ ): _lowercase : Optional[int] = '''blip''' _lowercase : List[Any] = True def __init__( self , a=None , a=None , a=512 , a=2.65_92 , a=256 , **a , ) -> Any: super().__init__(**a) if text_config is None: SCREAMING_SNAKE_CASE = {} logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.') if vision_config is None: SCREAMING_SNAKE_CASE = {} logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.') SCREAMING_SNAKE_CASE = BlipTextConfig(**a) SCREAMING_SNAKE_CASE = BlipVisionConfig(**a) SCREAMING_SNAKE_CASE = self.vision_config.hidden_size SCREAMING_SNAKE_CASE = projection_dim SCREAMING_SNAKE_CASE = logit_scale_init_value SCREAMING_SNAKE_CASE = 1.0 SCREAMING_SNAKE_CASE = 0.02 SCREAMING_SNAKE_CASE = image_text_hidden_size @classmethod def SCREAMING_SNAKE_CASE__ ( cls , a , a , **a) -> Optional[int]: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a) def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__) SCREAMING_SNAKE_CASE = self.text_config.to_dict() SCREAMING_SNAKE_CASE = self.vision_config.to_dict() SCREAMING_SNAKE_CASE = self.__class__.model_type return output
73
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : List[str] = logging.get_logger(__name__) a_ : Any = { 'microsoft/unispeech-large-1500h-cv': ( 'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class _snake_case ( A__ ): _lowercase : Optional[int] = '''unispeech''' def __init__( self , a=32 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=0.1 , a=0.0 , a=0.0 , a=0.1 , a=0.1 , a=0.02 , a=1E-5 , a="group" , a="gelu" , a=(512, 512, 512, 512, 512, 512, 512) , a=(5, 2, 2, 2, 2, 2, 2) , a=(10, 3, 3, 3, 3, 2, 2) , a=False , a=128 , a=16 , a=False , a=True , a=0.05 , a=10 , a=2 , a=0.0 , a=10 , a=0 , a=320 , a=2 , a=0.1 , a=100 , a=256 , a=256 , a=0.1 , a="mean" , a=False , a=False , a=256 , a=80 , a=0 , a=1 , a=2 , a=0.5 , **a , ) -> Optional[int]: super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a) SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = feat_extract_norm SCREAMING_SNAKE_CASE = feat_extract_activation SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = conv_bias SCREAMING_SNAKE_CASE = num_conv_pos_embeddings SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups SCREAMING_SNAKE_CASE = len(self.conv_dim) SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = activation_dropout SCREAMING_SNAKE_CASE = feat_proj_dropout SCREAMING_SNAKE_CASE = final_dropout SCREAMING_SNAKE_CASE = layerdrop SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = num_ctc_classes SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = do_stable_layer_norm SCREAMING_SNAKE_CASE = use_weighted_layer_sum SCREAMING_SNAKE_CASE = classifier_proj_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' f''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE = apply_spec_augment SCREAMING_SNAKE_CASE = mask_time_prob SCREAMING_SNAKE_CASE = mask_time_length SCREAMING_SNAKE_CASE = mask_time_min_masks SCREAMING_SNAKE_CASE = mask_feature_prob SCREAMING_SNAKE_CASE = mask_feature_length SCREAMING_SNAKE_CASE = mask_feature_min_masks # parameters for pretraining with codevector quantized representations SCREAMING_SNAKE_CASE = num_codevectors_per_group SCREAMING_SNAKE_CASE = num_codevector_groups SCREAMING_SNAKE_CASE = contrastive_logits_temperature SCREAMING_SNAKE_CASE = feat_quantizer_dropout SCREAMING_SNAKE_CASE = num_negatives SCREAMING_SNAKE_CASE = codevector_dim SCREAMING_SNAKE_CASE = proj_codevector_dim SCREAMING_SNAKE_CASE = diversity_loss_weight # ctc loss SCREAMING_SNAKE_CASE = ctc_loss_reduction SCREAMING_SNAKE_CASE = ctc_zero_infinity # pretraining loss SCREAMING_SNAKE_CASE = replace_prob @property def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: return functools.reduce(operator.mul , self.conv_stride , 1)
73
1
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. a_ : Optional[Any] = 10 def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): for i in range(_UpperCAmelCase , _UpperCAmelCase): if array[i] == target: return i return -1 def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) while left <= right: if right - left < precision: return lin_search(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = (left + right) // 3 + 1 SCREAMING_SNAKE_CASE = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: SCREAMING_SNAKE_CASE = one_third - 1 elif array[two_third] < target: SCREAMING_SNAKE_CASE = two_third + 1 else: SCREAMING_SNAKE_CASE = one_third + 1 SCREAMING_SNAKE_CASE = two_third - 1 else: return -1 def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): if left < right: if right - left < precision: return lin_search(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = (left + right) // 3 + 1 SCREAMING_SNAKE_CASE = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(_UpperCAmelCase , one_third - 1 , _UpperCAmelCase , _UpperCAmelCase) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) else: return rec_ternary_search(one_third + 1 , two_third - 1 , _UpperCAmelCase , _UpperCAmelCase) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() a_ : int = input('Enter numbers separated by comma:\n').strip() a_ : Tuple = [int(item.strip()) for item in user_input.split(',')] assert collection == sorted(collection), f"List must be ordered.\n{collection}." a_ : Dict = int(input('Enter the number to be found in the list:\n').strip()) a_ : int = ite_ternary_search(collection, target) a_ : str = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f"""Iterative search: {target} found at positions: {resulta}""") print(f"""Recursive search: {target} found at positions: {resulta}""") else: print('Not found')
73
import argparse import collections import json import os import re import string import sys import numpy as np a_ : Optional[Any] = re.compile(R'\b(a|an|the)\b', re.UNICODE) a_ : List[str] = None def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.') parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.') parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.') parser.add_argument( '--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).') parser.add_argument( '--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.') parser.add_argument( '--na-prob-thresh' , '-t' , type=_UpperCAmelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , ) parser.add_argument( '--out-image-dir' , '-p' , metavar='out_images' , default=_UpperCAmelCase , help='Save precision-recall curves to directory.') parser.add_argument('--verbose' , '-v' , action='store_true') if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args() def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: SCREAMING_SNAKE_CASE = bool(qa['answers']['text']) return qid_to_has_ans def lowerCamelCase__ (_UpperCAmelCase): def remove_articles(_UpperCAmelCase): return ARTICLES_REGEX.sub(' ' , _UpperCAmelCase) def white_space_fix(_UpperCAmelCase): return " ".join(text.split()) def remove_punc(_UpperCAmelCase): SCREAMING_SNAKE_CASE = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(_UpperCAmelCase): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase)))) def lowerCamelCase__ (_UpperCAmelCase): if not s: return [] return normalize_answer(_UpperCAmelCase).split() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return int(normalize_answer(_UpperCAmelCase) == normalize_answer(_UpperCAmelCase)) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase) SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase) SCREAMING_SNAKE_CASE = collections.Counter(_UpperCAmelCase) & collections.Counter(_UpperCAmelCase) SCREAMING_SNAKE_CASE = sum(common.values()) if len(_UpperCAmelCase) == 0 or len(_UpperCAmelCase) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks) if num_same == 0: return 0 SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase) SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase) SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: SCREAMING_SNAKE_CASE = qa['id'] SCREAMING_SNAKE_CASE = [t for t in qa['answers']['text'] if normalize_answer(_UpperCAmelCase)] if not gold_answers: # For unanswerable questions, only correct answer is empty string SCREAMING_SNAKE_CASE = [''] if qid not in preds: print(F'''Missing prediction for {qid}''') continue SCREAMING_SNAKE_CASE = preds[qid] # Take max over all gold answers SCREAMING_SNAKE_CASE = max(compute_exact(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers) SCREAMING_SNAKE_CASE = max(compute_fa(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers) return exact_scores, fa_scores def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = {} for qid, s in scores.items(): SCREAMING_SNAKE_CASE = na_probs[qid] > na_prob_thresh if pred_na: SCREAMING_SNAKE_CASE = float(not qid_to_has_ans[qid]) else: SCREAMING_SNAKE_CASE = s return new_scores def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None): if not qid_list: SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) return collections.OrderedDict( [ ('exact', 1_00.0 * sum(exact_scores.values()) / total), ('f1', 1_00.0 * sum(fa_scores.values()) / total), ('total', total), ]) else: SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) return collections.OrderedDict( [ ('exact', 1_00.0 * sum(exact_scores[k] for k in qid_list) / total), ('f1', 1_00.0 * sum(fa_scores[k] for k in qid_list) / total), ('total', total), ]) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): for k in new_eval: SCREAMING_SNAKE_CASE = new_eval[k] def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): plt.step(_UpperCAmelCase , _UpperCAmelCase , color='b' , alpha=0.2 , where='post') plt.fill_between(_UpperCAmelCase , _UpperCAmelCase , step='post' , alpha=0.2 , color='b') plt.xlabel('Recall') plt.ylabel('Precision') plt.xlim([0.0, 1.05]) plt.ylim([0.0, 1.05]) plt.title(_UpperCAmelCase) plt.savefig(_UpperCAmelCase) plt.clf() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None): SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k]) SCREAMING_SNAKE_CASE = 0.0 SCREAMING_SNAKE_CASE = 1.0 SCREAMING_SNAKE_CASE = 0.0 SCREAMING_SNAKE_CASE = [1.0] SCREAMING_SNAKE_CASE = [0.0] SCREAMING_SNAKE_CASE = 0.0 for i, qid in enumerate(_UpperCAmelCase): if qid_to_has_ans[qid]: true_pos += scores[qid] SCREAMING_SNAKE_CASE = true_pos / float(i + 1) SCREAMING_SNAKE_CASE = true_pos / float(_UpperCAmelCase) if i == len(_UpperCAmelCase) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(_UpperCAmelCase) recalls.append(_UpperCAmelCase) if out_image: plot_pr_curve(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) return {"ap": 1_00.0 * avg_prec} def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): if out_image_dir and not os.path.exists(_UpperCAmelCase): os.makedirs(_UpperCAmelCase) SCREAMING_SNAKE_CASE = sum(1 for v in qid_to_has_ans.values() if v) if num_true_pos == 0: return SCREAMING_SNAKE_CASE = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_exact.png') , title='Precision-Recall curve for Exact Match score' , ) SCREAMING_SNAKE_CASE = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_f1.png') , title='Precision-Recall curve for F1 score' , ) SCREAMING_SNAKE_CASE = {k: float(_UpperCAmelCase) for k, v in qid_to_has_ans.items()} SCREAMING_SNAKE_CASE = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_oracle.png') , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , ) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_exact') merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_f1') merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_oracle') def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): if not qid_list: return SCREAMING_SNAKE_CASE = [na_probs[k] for k in qid_list] SCREAMING_SNAKE_CASE = np.ones_like(_UpperCAmelCase) / float(len(_UpperCAmelCase)) plt.hist(_UpperCAmelCase , weights=_UpperCAmelCase , bins=20 , range=(0.0, 1.0)) plt.xlabel('Model probability of no-answer') plt.ylabel('Proportion of dataset') plt.title(F'''Histogram of no-answer probability: {name}''') plt.savefig(os.path.join(_UpperCAmelCase , F'''na_prob_hist_{name}.png''')) plt.clf() def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) SCREAMING_SNAKE_CASE = num_no_ans SCREAMING_SNAKE_CASE = cur_score SCREAMING_SNAKE_CASE = 0.0 SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k]) for i, qid in enumerate(_UpperCAmelCase): if qid not in scores: continue if qid_to_has_ans[qid]: SCREAMING_SNAKE_CASE = scores[qid] else: if preds[qid]: SCREAMING_SNAKE_CASE = -1 else: SCREAMING_SNAKE_CASE = 0 cur_score += diff if cur_score > best_score: SCREAMING_SNAKE_CASE = cur_score SCREAMING_SNAKE_CASE = na_probs[qid] return 1_00.0 * best_score / len(_UpperCAmelCase), best_thresh def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = best_exact SCREAMING_SNAKE_CASE = exact_thresh SCREAMING_SNAKE_CASE = best_fa SCREAMING_SNAKE_CASE = fa_thresh def lowerCamelCase__ (): with open(OPTS.data_file) as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) SCREAMING_SNAKE_CASE = dataset_json['data'] with open(OPTS.pred_file) as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) if OPTS.na_prob_file: with open(OPTS.na_prob_file) as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) else: SCREAMING_SNAKE_CASE = {k: 0.0 for k in preds} SCREAMING_SNAKE_CASE = make_qid_to_has_ans(_UpperCAmelCase) # maps qid to True/False SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if v] SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if not v] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_raw_scores(_UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh) SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh) SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase) if has_ans_qids: SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'HasAns') if no_ans_qids: SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'NoAns') if OPTS.na_prob_file: find_all_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir) histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'hasAns') histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'noAns') if OPTS.out_file: with open(OPTS.out_file , 'w') as f: json.dump(_UpperCAmelCase , _UpperCAmelCase) else: print(json.dumps(_UpperCAmelCase , indent=2)) if __name__ == "__main__": a_ : Any = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt main()
73
1
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Dict = logging.get_logger(__name__) a_ : Union[str, Any] = { 'edbeeching/decision-transformer-gym-hopper-medium': ( 'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class _snake_case ( A__ ): _lowercase : Optional[Any] = '''decision_transformer''' _lowercase : str = ['''past_key_values'''] _lowercase : Union[str, Any] = { '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , a=17 , a=4 , a=128 , a=4096 , a=True , a=1 , a=1024 , a=3 , a=1 , a=None , a="relu" , a=0.1 , a=0.1 , a=0.1 , a=1E-5 , a=0.02 , a=True , a=True , a=5_0256 , a=5_0256 , a=False , a=False , **a , ) -> List[str]: SCREAMING_SNAKE_CASE = state_dim SCREAMING_SNAKE_CASE = act_dim SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = max_ep_len SCREAMING_SNAKE_CASE = action_tanh SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = n_positions SCREAMING_SNAKE_CASE = n_layer SCREAMING_SNAKE_CASE = n_head SCREAMING_SNAKE_CASE = n_inner SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = resid_pdrop SCREAMING_SNAKE_CASE = embd_pdrop SCREAMING_SNAKE_CASE = attn_pdrop SCREAMING_SNAKE_CASE = layer_norm_epsilon SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = scale_attn_weights SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx SCREAMING_SNAKE_CASE = reorder_and_upcast_attn SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = eos_token_id super().__init__(bos_token_id=a , eos_token_id=a , **a)
73
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor a_ : Dict = logging.get_logger(__name__) class _snake_case ( A__ ): def __init__( self , *a , **a) -> None: warnings.warn( 'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use GLPNImageProcessor instead.' , a , ) super().__init__(*a , **a)
73
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Union[str, Any] = { 'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json', # See all LeViT models at https://huggingface.co/models?filter=levit } class _snake_case ( A__ ): _lowercase : Optional[int] = '''levit''' def __init__( self , a=224 , a=3 , a=3 , a=2 , a=1 , a=16 , a=[128, 256, 384] , a=[4, 8, 12] , a=[4, 4, 4] , a=[16, 16, 16] , a=0 , a=[2, 2, 2] , a=[2, 2, 2] , a=0.02 , **a , ) -> List[Any]: super().__init__(**a) SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = kernel_size SCREAMING_SNAKE_CASE = stride SCREAMING_SNAKE_CASE = padding SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = key_dim SCREAMING_SNAKE_CASE = drop_path_rate SCREAMING_SNAKE_CASE = patch_size SCREAMING_SNAKE_CASE = attention_ratio SCREAMING_SNAKE_CASE = mlp_ratio SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = [ ['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class _snake_case ( A__ ): _lowercase : int = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def SCREAMING_SNAKE_CASE__ ( self) -> float: return 1E-4
73
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class _snake_case ( unittest.TestCase , A__ ): def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = load_tool('text-classification') self.tool.setup() SCREAMING_SNAKE_CASE = load_tool('text-classification' , remote=a) def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = self.tool('That\'s quite cool' , ['positive', 'negative']) self.assertEqual(a , 'positive') def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: SCREAMING_SNAKE_CASE = self.remote_tool('That\'s quite cool' , ['positive', 'negative']) self.assertEqual(a , 'positive') def SCREAMING_SNAKE_CASE__ ( self) -> int: SCREAMING_SNAKE_CASE = self.tool(text='That\'s quite cool' , labels=['positive', 'negative']) self.assertEqual(a , 'positive') def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative']) self.assertEqual(a , 'positive')
73
1
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging a_ : Any = logging.get_logger(__name__) a_ : Any = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class _snake_case ( A__ ): _lowercase : Any = '''gpt_neo''' _lowercase : int = ['''past_key_values'''] _lowercase : Tuple = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , a=5_0257 , a=2048 , a=2048 , a=24 , a=[[["global", "local"], 12]] , a=16 , a=None , a=256 , a="gelu_new" , a=0.0 , a=0.0 , a=0.0 , a=0.1 , a=1E-5 , a=0.02 , a=True , a=5_0256 , a=5_0256 , **a , ) -> Tuple: SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_layers SCREAMING_SNAKE_CASE = num_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = window_size SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = resid_dropout SCREAMING_SNAKE_CASE = embed_dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = classifier_dropout SCREAMING_SNAKE_CASE = layer_norm_epsilon SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = eos_token_id SCREAMING_SNAKE_CASE = attention_types SCREAMING_SNAKE_CASE = self.expand_attention_types_params(a) if len(self.attention_layers) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' f'''but is `len(config.attention_layers) = {len(self.attention_layers)}`, ''' f'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.') super().__init__(bos_token_id=a , eos_token_id=a , **a) @staticmethod def SCREAMING_SNAKE_CASE__ ( a) -> Any: SCREAMING_SNAKE_CASE = [] for item in attention_types: for _ in range(item[1]): attentions.extend(item[0]) return attentions def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): import torch SCREAMING_SNAKE_CASE = input.size() SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) SCREAMING_SNAKE_CASE = shape[dimension] SCREAMING_SNAKE_CASE = torch.arange(0 , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = torch.div(sizedim - size , _UpperCAmelCase , rounding_mode='floor') + 1 SCREAMING_SNAKE_CASE = torch.arange(_UpperCAmelCase) + low_indices[:min_length][:, None] SCREAMING_SNAKE_CASE = [slice(_UpperCAmelCase)] * rank SCREAMING_SNAKE_CASE = indices SCREAMING_SNAKE_CASE = input[s] SCREAMING_SNAKE_CASE = list(range(0 , rank + 1)) perm.append(perm.pop(dimension + 1)) return sliced.permute(_UpperCAmelCase) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): import torch SCREAMING_SNAKE_CASE = torch.arange(1 , _UpperCAmelCase) SCREAMING_SNAKE_CASE = torch.remainder(_UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE = remainders == 0 SCREAMING_SNAKE_CASE = candidates[divisor_indices] SCREAMING_SNAKE_CASE = torch.max(_UpperCAmelCase) return largest_divisor, torch.div(_UpperCAmelCase , _UpperCAmelCase , rounding_mode='floor') class _snake_case ( A__ ): @property def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]: SCREAMING_SNAKE_CASE = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}}) if self.use_past: self.fill_with_past_key_values_(a , direction='inputs') SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'past_sequence + sequence'} else: SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'} return common_inputs @property def SCREAMING_SNAKE_CASE__ ( self) -> int: return self._config.num_heads def SCREAMING_SNAKE_CASE__ ( self , a , a = -1 , a = -1 , a = False , a = None , ) -> Mapping[str, Any]: SCREAMING_SNAKE_CASE = super(a , self).generate_dummy_inputs( a , batch_size=a , seq_length=a , is_pair=a , framework=a) # We need to order the input in the way they appears in the forward() SCREAMING_SNAKE_CASE = OrderedDict({'input_ids': common_inputs['input_ids']}) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs['input_ids'].shape # Not using the same length for past_key_values SCREAMING_SNAKE_CASE = seqlen + 2 SCREAMING_SNAKE_CASE = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) SCREAMING_SNAKE_CASE = [ (torch.zeros(a), torch.zeros(a)) for _ in range(self.num_layers) ] SCREAMING_SNAKE_CASE = common_inputs['attention_mask'] if self.use_past: SCREAMING_SNAKE_CASE = ordered_inputs['attention_mask'].dtype SCREAMING_SNAKE_CASE = torch.cat( [ordered_inputs['attention_mask'], torch.ones(a , a , dtype=a)] , dim=1) return ordered_inputs @property def SCREAMING_SNAKE_CASE__ ( self) -> int: return 13
73
import sys import turtle def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): my_pen.up() my_pen.goto(vertexa[0] , vertexa[1]) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1]) my_pen.goto(vertexa[0] , vertexa[1]) my_pen.goto(vertexa[0] , vertexa[1]) if depth == 0: return triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1) triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1) triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( 'Correct format for using this script: ' 'python fractals.py <int:depth_for_fractal>' ) a_ : Any = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('red') a_ : str = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
73
1
import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) a_ : Any = logging.getLogger() def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'all_results.json') if os.path.exists(_UpperCAmelCase): with open(_UpperCAmelCase , 'r') as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) else: raise ValueError(F'''can\'t find {path}''') return results a_ : Union[str, Any] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class _snake_case ( A__ ): def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: import xla_spawn SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE = f''' ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 '''.split() with patch.object(a , 'argv' , a): SCREAMING_SNAKE_CASE = time() xla_spawn.main() SCREAMING_SNAKE_CASE = time() SCREAMING_SNAKE_CASE = get_results(a) self.assertGreaterEqual(result['eval_accuracy'] , 0.75) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: import xla_spawn SCREAMING_SNAKE_CASE = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split() with patch.object(a , 'argv' , a): xla_spawn.main()
73
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed a_ : Any = 'true' def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=16): set_seed(42) SCREAMING_SNAKE_CASE = RegressionModel() SCREAMING_SNAKE_CASE = deepcopy(_UpperCAmelCase) SCREAMING_SNAKE_CASE = RegressionDataset(length=_UpperCAmelCase) SCREAMING_SNAKE_CASE = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase) model.to(accelerator.device) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase) return model, ddp_model, dataloader def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False): SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased') SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' , split='validation') def tokenize_function(_UpperCAmelCase): SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase) return outputs with accelerator.main_process_first(): SCREAMING_SNAKE_CASE = dataset.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels') def collate_fn(_UpperCAmelCase): if use_longest: return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt') return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt') return DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=16) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = Accelerator(dispatch_batches=_UpperCAmelCase , split_batches=_UpperCAmelCase) SCREAMING_SNAKE_CASE = get_dataloader(_UpperCAmelCase , not dispatch_batches) SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained( 'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [] for batch in dataloader: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values() with torch.no_grad(): SCREAMING_SNAKE_CASE = model(_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((logit, target)) logits_and_targets.append((logit, target)) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], [] for logit, targ in logits_and_targets: logits.append(_UpperCAmelCase) targs.append(_UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.cat(_UpperCAmelCase), torch.cat(_UpperCAmelCase) return logits, targs def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=16): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_basic_setup(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_predictions(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) assert ( len(_UpperCAmelCase) == num_samples ), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCAmelCase)}''' def lowerCamelCase__ (_UpperCAmelCase = False , _UpperCAmelCase = False): SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc') SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_mrpc_setup(_UpperCAmelCase , _UpperCAmelCase) # First do baseline SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['no'] model.to(_UpperCAmelCase) model.eval() for batch in dataloader: batch.to(_UpperCAmelCase) with torch.inference_mode(): SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1) metric.add_batch(predictions=_UpperCAmelCase , references=batch['labels']) SCREAMING_SNAKE_CASE = metric.compute() # Then do distributed SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['ddp'] model.eval() for batch in dataloader: with torch.inference_mode(): SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1) SCREAMING_SNAKE_CASE = batch['labels'] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((preds, references)) metric.add_batch(predictions=_UpperCAmelCase , references=_UpperCAmelCase) SCREAMING_SNAKE_CASE = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key]), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('**Testing gather_for_metrics**') for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''') test_mrpc(_UpperCAmelCase , _UpperCAmelCase) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test torch metrics**') for split_batches in [True, False]: for dispatch_batches in [True, False]: SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase) if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''') test_torch_metrics(_UpperCAmelCase , 99) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test last batch is not dropped when perfectly divisible**') SCREAMING_SNAKE_CASE = Accelerator() test_torch_metrics(_UpperCAmelCase , 512) accelerator.state._reset_state() def lowerCamelCase__ (_UpperCAmelCase): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
73
1
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss a_ : Optional[Any] = pytest.mark.integration @require_faiss class _snake_case ( A__ ): def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(a) for x in np.arange(30).tolist()]}) return dset def SCREAMING_SNAKE_CASE__ ( self) -> int: import faiss SCREAMING_SNAKE_CASE = self._create_dummy_dataset() SCREAMING_SNAKE_CASE = dset.map( lambda a , a: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=a , keep_in_memory=a) SCREAMING_SNAKE_CASE = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa)) self.assertEqual(examples['filename'][0] , 'my_name-train_29') dset.drop_index('vecs') def SCREAMING_SNAKE_CASE__ ( self) -> Any: import faiss SCREAMING_SNAKE_CASE = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1 , 1) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa)) self.assertEqual(examples['filename'][0] , 'my_name-train_29') def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: import faiss SCREAMING_SNAKE_CASE = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=a) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name) dset.load_faiss_index('vecs2' , tmp_file.name) os.unlink(tmp_file.name) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa)) self.assertEqual(examples['filename'][0] , 'my_name-train_29') def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1 , 1) , index_name='vecs') dset.drop_index('vecs') self.assertRaises(a , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa))) def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: from elasticsearch import Elasticsearch SCREAMING_SNAKE_CASE = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch( 'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk: SCREAMING_SNAKE_CASE = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 30) SCREAMING_SNAKE_CASE = {'hits': {'hits': [{'_score': 1, '_id': 29}]}} SCREAMING_SNAKE_CASE = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=a) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dset.get_nearest_examples('filename' , 'my_name-train_29') self.assertEqual(examples['filename'][0] , 'my_name-train_29') @require_faiss class _snake_case ( A__ ): def SCREAMING_SNAKE_CASE__ ( self) -> str: import faiss SCREAMING_SNAKE_CASE = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa)) self.assertIsNotNone(index.faiss_index) self.assertEqual(index.faiss_index.ntotal , 5) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa)) self.assertEqual(index.faiss_index.ntotal , 10) # single query SCREAMING_SNAKE_CASE = np.zeros(5 , dtype=np.floataa) SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = index.search(a) self.assertRaises(a , index.search , query.reshape(-1 , 1)) self.assertGreater(scores[0] , 0) self.assertEqual(indices[0] , 1) # batched queries SCREAMING_SNAKE_CASE = np.eye(5 , dtype=np.floataa)[::-1] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = index.search_batch(a) self.assertRaises(a , index.search_batch , queries[0]) SCREAMING_SNAKE_CASE = [scores[0] for scores in total_scores] SCREAMING_SNAKE_CASE = [indices[0] for indices in total_indices] self.assertGreater(np.min(a) , 0) self.assertListEqual([4, 3, 2, 1, 0] , a) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: import faiss SCREAMING_SNAKE_CASE = FaissIndex(string_factory='Flat') index.add_vectors(np.eye(5 , dtype=np.floataa)) self.assertIsInstance(index.faiss_index , faiss.IndexFlat) SCREAMING_SNAKE_CASE = FaissIndex(string_factory='LSH') index.add_vectors(np.eye(5 , dtype=np.floataa)) self.assertIsInstance(index.faiss_index , faiss.IndexLSH) with self.assertRaises(a): SCREAMING_SNAKE_CASE = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5)) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: import faiss SCREAMING_SNAKE_CASE = faiss.IndexFlat(5) SCREAMING_SNAKE_CASE = FaissIndex(custom_index=a) index.add_vectors(np.eye(5 , dtype=np.floataa)) self.assertIsInstance(index.faiss_index , faiss.IndexFlat) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: import faiss SCREAMING_SNAKE_CASE = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT) index.add_vectors(np.eye(5 , dtype=np.floataa)) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=a) as tmp_file: index.save(tmp_file.name) SCREAMING_SNAKE_CASE = FaissIndex.load(tmp_file.name) os.unlink(tmp_file.name) SCREAMING_SNAKE_CASE = np.zeros(5 , dtype=np.floataa) SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = index.search(a) self.assertGreater(scores[0] , 0) self.assertEqual(indices[0] , 1) @require_faiss def lowerCamelCase__ (_UpperCAmelCase): import faiss SCREAMING_SNAKE_CASE = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT) index.add_vectors(np.eye(5 , dtype=np.floataa)) SCREAMING_SNAKE_CASE = 'index.faiss' SCREAMING_SNAKE_CASE = F'''mock://{index_name}''' index.save(_UpperCAmelCase , storage_options=mockfs.storage_options) SCREAMING_SNAKE_CASE = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options) SCREAMING_SNAKE_CASE = np.zeros(5 , dtype=np.floataa) SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = index.search(_UpperCAmelCase) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _snake_case ( A__ ): def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch( 'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk: SCREAMING_SNAKE_CASE = Elasticsearch() SCREAMING_SNAKE_CASE = {'acknowledged': True} SCREAMING_SNAKE_CASE = ElasticSearchIndex(es_client=a) mocked_bulk.return_value([(True, None)] * 3) index.add_documents(['foo', 'bar', 'foobar']) # single query SCREAMING_SNAKE_CASE = 'foo' SCREAMING_SNAKE_CASE = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = index.search(a) self.assertEqual(scores[0] , 1) self.assertEqual(indices[0] , 0) # single query with timeout SCREAMING_SNAKE_CASE = 'foo' SCREAMING_SNAKE_CASE = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = index.search(a , request_timeout=30) self.assertEqual(scores[0] , 1) self.assertEqual(indices[0] , 0) # batched queries SCREAMING_SNAKE_CASE = ['foo', 'bar', 'foobar'] SCREAMING_SNAKE_CASE = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = index.search_batch(a) SCREAMING_SNAKE_CASE = [scores[0] for scores in total_scores] SCREAMING_SNAKE_CASE = [indices[0] for indices in total_indices] self.assertGreater(np.min(a) , 0) self.assertListEqual([1, 1, 1] , a) # batched queries with timeout SCREAMING_SNAKE_CASE = ['foo', 'bar', 'foobar'] SCREAMING_SNAKE_CASE = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = index.search_batch(a , request_timeout=30) SCREAMING_SNAKE_CASE = [scores[0] for scores in total_scores] SCREAMING_SNAKE_CASE = [indices[0] for indices in total_indices] self.assertGreater(np.min(a) , 0) self.assertListEqual([1, 1, 1] , a)
73
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available a_ : List[str] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ['GPTSw3Tokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys a_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
73
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False): SCREAMING_SNAKE_CASE = [] for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''')) rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''')) rename_keys.append( (F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''')) rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''')) rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''')) rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''')) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''')) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''')) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''')) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''')) # projection layer + position embeddings rename_keys.extend( [ ('module.cls_token', 'vit.embeddings.cls_token'), ('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'), ('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'), ('module.pos_embed', 'vit.embeddings.position_embeddings'), ]) if base_model: # layernorm + pooler rename_keys.extend( [ ('module.norm.weight', 'layernorm.weight'), ('module.norm.bias', 'layernorm.bias'), ]) # if just the base model, we should remove "vit" from all keys that start with "vit" SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith('vit') else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('norm.weight', 'vit.layernorm.weight'), ('norm.bias', 'vit.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ]) return rename_keys def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False): for i in range(config.num_hidden_layers): if base_model: SCREAMING_SNAKE_CASE = '' else: SCREAMING_SNAKE_CASE = 'vit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''') SCREAMING_SNAKE_CASE = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''') # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE = in_proj_weight[ : config.hidden_size, : ] SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :] def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase) def lowerCamelCase__ (_UpperCAmelCase): # projection head is used in the self-supervised pre-training in MSN, # for downstream task it's not needed. SCREAMING_SNAKE_CASE = [ 'module.fc.fc1.weight', 'module.fc.fc1.bias', 'module.fc.bn1.weight', 'module.fc.bn1.bias', 'module.fc.bn1.running_mean', 'module.fc.bn1.running_var', 'module.fc.bn1.num_batches_tracked', 'module.fc.fc2.weight', 'module.fc.fc2.bias', 'module.fc.bn2.weight', 'module.fc.bn2.bias', 'module.fc.bn2.running_mean', 'module.fc.bn2.running_var', 'module.fc.bn2.num_batches_tracked', 'module.fc.fc3.weight', 'module.fc.fc3.bias', ] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = dct.pop(_UpperCAmelCase) SCREAMING_SNAKE_CASE = val def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = ViTMSNConfig() SCREAMING_SNAKE_CASE = 1000 SCREAMING_SNAKE_CASE = 'datasets/huggingface/label-files' SCREAMING_SNAKE_CASE = 'imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase) , 'r')) SCREAMING_SNAKE_CASE = {int(_UpperCAmelCase): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: SCREAMING_SNAKE_CASE = 384 SCREAMING_SNAKE_CASE = 1536 SCREAMING_SNAKE_CASE = 6 elif "l16" in checkpoint_url: SCREAMING_SNAKE_CASE = 1024 SCREAMING_SNAKE_CASE = 4096 SCREAMING_SNAKE_CASE = 24 SCREAMING_SNAKE_CASE = 16 SCREAMING_SNAKE_CASE = 0.1 elif "b4" in checkpoint_url: SCREAMING_SNAKE_CASE = 4 elif "l7" in checkpoint_url: SCREAMING_SNAKE_CASE = 7 SCREAMING_SNAKE_CASE = 1024 SCREAMING_SNAKE_CASE = 4096 SCREAMING_SNAKE_CASE = 24 SCREAMING_SNAKE_CASE = 16 SCREAMING_SNAKE_CASE = 0.1 SCREAMING_SNAKE_CASE = ViTMSNModel(_UpperCAmelCase) SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu')['target_encoder'] SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size) remove_projection_head(_UpperCAmelCase) SCREAMING_SNAKE_CASE = create_rename_keys(_UpperCAmelCase , base_model=_UpperCAmelCase) for src, dest in rename_keys: rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , base_model=_UpperCAmelCase) model.load_state_dict(_UpperCAmelCase) model.eval() SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg' SCREAMING_SNAKE_CASE = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase).raw) SCREAMING_SNAKE_CASE = ViTImageProcessor( size=config.image_size , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase) SCREAMING_SNAKE_CASE = image_processor(images=_UpperCAmelCase , return_tensors='pt') # forward pass torch.manual_seed(2) SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase) SCREAMING_SNAKE_CASE = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: SCREAMING_SNAKE_CASE = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]]) elif "b16" in checkpoint_url: SCREAMING_SNAKE_CASE = torch.tensor([[14.28_89, -18.90_45, 11.72_81]]) elif "l16" in checkpoint_url: SCREAMING_SNAKE_CASE = torch.tensor([[41.50_28, -22.86_81, 45.64_75]]) elif "b4" in checkpoint_url: SCREAMING_SNAKE_CASE = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]]) else: SCREAMING_SNAKE_CASE = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]]) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , _UpperCAmelCase , atol=1e-4) print(F'''Saving model to {pytorch_dump_folder_path}''') model.save_pretrained(_UpperCAmelCase) print(F'''Saving image processor to {pytorch_dump_folder_path}''') image_processor.save_pretrained(_UpperCAmelCase) if __name__ == "__main__": a_ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar', type=str, help='URL of the checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) a_ : List[Any] = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
73
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path a_ : str = [ {'dataset': 'wikipedia', 'config_name': '20220301.de'}, {'dataset': 'wikipedia', 'config_name': '20220301.en'}, {'dataset': 'wikipedia', 'config_name': '20220301.fr'}, {'dataset': 'wikipedia', 'config_name': '20220301.frr'}, {'dataset': 'wikipedia', 'config_name': '20220301.it'}, {'dataset': 'wikipedia', 'config_name': '20220301.simple'}, {'dataset': 'snli', 'config_name': 'plain_text'}, {'dataset': 'eli5', 'config_name': 'LFQA_reddit'}, {'dataset': 'wiki40b', 'config_name': 'en'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'}, {'dataset': 'natural_questions', 'config_name': 'default'}, ] def lowerCamelCase__ (_UpperCAmelCase=True): if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A__ ) ) class _snake_case ( A__ ): _lowercase : Optional[Any] = None _lowercase : Optional[Any] = None def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Optional[Any]: with TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE = dataset_module_factory(a , cache_dir=a) SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=a) SCREAMING_SNAKE_CASE = builder_cls( cache_dir=a , config_name=a , hash=dataset_module.hash , ) SCREAMING_SNAKE_CASE = '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=a).replace(os.sep , '/'), config.DATASET_INFO_FILENAME, ]) SCREAMING_SNAKE_CASE = cached_path(a , cache_dir=a) self.assertTrue(os.path.exists(a)) @pytest.mark.integration def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('test_hf_gcp') / 'test_wikipedia_simple' SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase) SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path) SCREAMING_SNAKE_CASE = builder_cls( cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam SCREAMING_SNAKE_CASE = None builder_instance.download_and_prepare() SCREAMING_SNAKE_CASE = builder_instance.as_dataset() assert ds @pytest.mark.integration def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase) SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=_UpperCAmelCase) SCREAMING_SNAKE_CASE = builder_cls( cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , ) SCREAMING_SNAKE_CASE = builder_instance.as_streaming_dataset() assert ds assert isinstance(_UpperCAmelCase , _UpperCAmelCase) assert "train" in ds assert isinstance(ds['train'] , _UpperCAmelCase) assert next(iter(ds['train']))
73
1