code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
283
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class UpperCAmelCase_ : '''simple docstring''' def __init__( self , __A , ): """simple docstring""" lowerCamelCase : str = parent lowerCamelCase : Union[str, Any] = 13 lowerCamelCase : Optional[Any] = 7 lowerCamelCase : List[str] = True lowerCamelCase : Optional[int] = True lowerCamelCase : Union[str, Any] = True lowerCamelCase : List[Any] = True lowerCamelCase : Tuple = True lowerCamelCase : Any = False lowerCamelCase : int = False lowerCamelCase : Tuple = False lowerCamelCase : Union[str, Any] = 2 lowerCamelCase : Dict = 99 lowerCamelCase : Tuple = 0 lowerCamelCase : Any = 32 lowerCamelCase : List[Any] = 2 lowerCamelCase : Tuple = 4 lowerCamelCase : List[str] = 0.1 lowerCamelCase : int = 0.1 lowerCamelCase : int = 512 lowerCamelCase : List[Any] = 16 lowerCamelCase : Any = 2 lowerCamelCase : Any = 0.02 lowerCamelCase : List[str] = 3 lowerCamelCase : Tuple = 4 lowerCamelCase : int = "last" lowerCamelCase : int = True lowerCamelCase : Dict = None lowerCamelCase : Tuple = 0 def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) lowerCamelCase : Tuple = None if self.use_input_lengths: lowerCamelCase : Optional[Any] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCamelCase : str = None if self.use_token_type_ids: lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowerCamelCase : Dict = None lowerCamelCase : Dict = None lowerCamelCase : Tuple = None if self.use_labels: lowerCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase : int = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase : List[Any] = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Optional[Any] = TFFlaubertModel(config=__A ) lowerCamelCase : Any = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} lowerCamelCase : Dict = model(__A ) lowerCamelCase : Any = [input_ids, input_mask] lowerCamelCase : Tuple = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : int = TFFlaubertWithLMHeadModel(__A ) lowerCamelCase : List[str] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} lowerCamelCase : int = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(__A ) lowerCamelCase : Optional[int] = {"input_ids": input_ids, "lengths": input_lengths} lowerCamelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Optional[int] = TFFlaubertForSequenceClassification(__A ) lowerCamelCase : str = {"input_ids": input_ids, "lengths": input_lengths} lowerCamelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Tuple = self.num_labels lowerCamelCase : Optional[Any] = TFFlaubertForTokenClassification(config=__A ) lowerCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCamelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Any = self.num_choices lowerCamelCase : Optional[Any] = TFFlaubertForMultipleChoice(config=__A ) lowerCamelCase : Tuple = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase : int = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase : List[str] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase : Optional[int] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } lowerCamelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = self.prepare_config_and_inputs() ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Optional[Any] = config_and_inputs lowerCamelCase : List[Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "langs": token_type_ids, "lengths": input_lengths, } return config, inputs_dict @require_tf class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): '''simple docstring''' __A : str = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) __A : Dict = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable __A : Any = ( { "feature-extraction": TFFlaubertModel, "fill-mask": TFFlaubertWithLMHeadModel, "question-answering": TFFlaubertForQuestionAnsweringSimple, "text-classification": TFFlaubertForSequenceClassification, "token-classification": TFFlaubertForTokenClassification, "zero-shot": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) __A : List[str] = False __A : List[str] = False def _snake_case ( self , __A , __A , __A , __A , __A ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _snake_case ( self ): """simple docstring""" lowerCamelCase : Tuple = TFFlaubertModelTester(self ) lowerCamelCase : Optional[int] = ConfigTester(self , config_class=__A , emb_dim=37 ) def _snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*__A ) @slow def _snake_case ( self ): """simple docstring""" for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : int = TFFlaubertModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @require_tf @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[int] = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" ) lowerCamelCase : str = tf.convert_to_tensor( [[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" lowerCamelCase : Dict = model(__A )[0] lowerCamelCase : List[str] = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape , __A ) # compare the actual values for a slice. lowerCamelCase : Tuple = tf.convert_to_tensor( [ [ [-1.8768773, -1.566555, 0.27072418], [-1.6920038, -0.5873505, 1.9329599], [-2.9563985, -1.6993835, 1.7972052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
283
1
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging _snake_case = { '''cola''': 2, '''mnli''': 3, '''mrpc''': 2, '''sst-2''': 2, '''sts-b''': 1, '''qqp''': 2, '''qnli''': 2, '''rte''': 2, '''wnli''': 2, } logging.set_verbosity_info() def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ): '''simple docstring''' lowerCamelCase : Dict = XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Optional[int] = finetuning_task.lower() if finetuning_task is not None else "" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" ) lowerCamelCase : List[Any] = finetuning_task lowerCamelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task] lowerCamelCase : List[str] = XLNetForSequenceClassification(SCREAMING_SNAKE_CASE_ ) elif "squad" in finetuning_task: lowerCamelCase : Any = finetuning_task lowerCamelCase : int = XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE_ ) else: lowerCamelCase : int = XLNetLMHeadModel(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save pytorch-model lowerCamelCase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase : str = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print(f"""Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE_ )}""" ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ ) print(f"""Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE_ )}""" ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--xlnet_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained XLNet model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--finetuning_task''', default=None, type=str, help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''', ) _snake_case = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
283
import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[] ): '''simple docstring''' lowerCamelCase : Optional[Any] = size[0] - overlap_pixels * 2 lowerCamelCase : int = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels lowerCamelCase : Tuple = np.ones((size_y, size_x) , dtype=np.uinta ) * 255 lowerCamelCase : List[Any] = np.pad(SCREAMING_SNAKE_CASE_ , mode="linear_ramp" , pad_width=SCREAMING_SNAKE_CASE_ , end_values=0 ) if "l" in remove_borders: lowerCamelCase : Optional[Any] = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: lowerCamelCase : List[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: lowerCamelCase : List[Any] = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: lowerCamelCase : Tuple = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return max(SCREAMING_SNAKE_CASE_ , min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Optional[Any] = list(SCREAMING_SNAKE_CASE_ ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap lowerCamelCase : Any = clamp_rect(SCREAMING_SNAKE_CASE_ , [0, 0] , [image_size[0], image_size[1]] ) return rect def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Dict = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(SCREAMING_SNAKE_CASE_ , (original_slice, 0) ) return result def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) lowerCamelCase : int = tile.crop(SCREAMING_SNAKE_CASE_ ) return tile def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : int = n % d return n - divisor class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' def __init__( self , __A , __A , __A , __A , __A , __A , __A = 350 , ): """simple docstring""" super().__init__( vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , low_res_scheduler=__A , scheduler=__A , max_noise_level=__A , ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , **__A ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase : Tuple = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) lowerCamelCase : Union[str, Any] = add_overlap_rect(__A , __A , image.size ) lowerCamelCase : List[str] = image.crop(__A ) lowerCamelCase : Optional[int] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] lowerCamelCase : int = translated_slice_x - (original_image_slice / 2) lowerCamelCase : Optional[Any] = max(0 , __A ) lowerCamelCase : Tuple = squeeze_tile(__A , __A , __A , __A ) lowerCamelCase : Dict = to_input.size lowerCamelCase : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) lowerCamelCase : Dict = super(__A , self ).__call__(image=__A , **__A ).images[0] lowerCamelCase : Tuple = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) lowerCamelCase : Optional[Any] = unsqueeze_tile(__A , __A ) lowerCamelCase : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) lowerCamelCase : int = [] if x == 0: remove_borders.append("l" ) elif crop_rect[2] == image.size[0]: remove_borders.append("r" ) if y == 0: remove_borders.append("t" ) elif crop_rect[3] == image.size[1]: remove_borders.append("b" ) lowerCamelCase : int = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__A ) , mode="L" , ) final_image.paste( __A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __A ) @torch.no_grad() def __call__( self , __A , __A , __A = 75 , __A = 9.0 , __A = 50 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = None , __A = 1 , __A = 128 , __A = 32 , __A = 32 , ): """simple docstring""" lowerCamelCase : Dict = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) ) lowerCamelCase : Union[str, Any] = math.ceil(image.size[0] / tile_size ) lowerCamelCase : Dict = math.ceil(image.size[1] / tile_size ) lowerCamelCase : str = tcx * tcy lowerCamelCase : int = 0 for y in range(__A ): for x in range(__A ): self._process_tile( __A , __A , __A , __A , __A , __A , __A , prompt=__A , num_inference_steps=__A , guidance_scale=__A , noise_level=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , ) current_count += 1 if callback is not None: callback({"progress": current_count / total_tile_count, "image": final_image} ) return final_image def lowercase_( ): '''simple docstring''' lowerCamelCase : Dict = "stabilityai/stable-diffusion-x4-upscaler" lowerCamelCase : Union[str, Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , revision="fp16" , torch_dtype=torch.floataa ) lowerCamelCase : Optional[Any] = pipe.to("cuda" ) lowerCamelCase : List[str] = Image.open("../../docs/source/imgs/diffusers_library.jpg" ) def callback(SCREAMING_SNAKE_CASE_ ): print(f"""progress: {obj['progress']:.4f}""" ) obj["image"].save("diffusers_library_progress.jpg" ) lowerCamelCase : int = pipe(image=SCREAMING_SNAKE_CASE_ , prompt="Black font, white background, vector" , noise_level=40 , callback=SCREAMING_SNAKE_CASE_ ) final_image.save("diffusers_library.jpg" ) if __name__ == "__main__": main()
283
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = '''▁''' _snake_case = {'''vocab_file''': '''sentencepiece.bpe.model'''} _snake_case = { '''vocab_file''': { '''facebook/mbart-large-50-one-to-many-mmt''': ( '''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model''' ), } } _snake_case = { '''facebook/mbart-large-50-one-to-many-mmt''': 10_24, } # fmt: off _snake_case = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI'''] class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : List[str] = VOCAB_FILES_NAMES __A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : Tuple = PRETRAINED_VOCAB_FILES_MAP __A : int = ["input_ids", "attention_mask"] __A : List[int] = [] __A : List[int] = [] def __init__( self , __A , __A=None , __A=None , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A = None , **__A , ): """simple docstring""" lowerCamelCase : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token lowerCamelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs lowerCamelCase : Union[str, Any] = kwargs.get("additional_special_tokens" , [] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__A , tgt_lang=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , ) lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__A ) ) lowerCamelCase : Tuple = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token lowerCamelCase : List[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowerCamelCase : str = 1 lowerCamelCase : Tuple = len(self.sp_model ) lowerCamelCase : Optional[int] = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__A ) } lowerCamelCase : str = {v: k for k, v in self.lang_code_to_id.items()} lowerCamelCase : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) lowerCamelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()} lowerCamelCase : str = src_lang if src_lang is not None else "en_XX" lowerCamelCase : Optional[int] = self.lang_code_to_id[self._src_lang] lowerCamelCase : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _snake_case ( self ): """simple docstring""" return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def _snake_case ( self ): """simple docstring""" return self._src_lang @src_lang.setter def _snake_case ( self , __A ): """simple docstring""" lowerCamelCase : Any = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ): """simple docstring""" lowerCamelCase : List[Any] = self.__dict__.copy() lowerCamelCase : Dict = None return state def __setstate__( self , __A ): """simple docstring""" lowerCamelCase : List[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCamelCase : Any = {} lowerCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case ( self , __A ): """simple docstring""" return self.sp_model.encode(__A , out_type=__A ) def _snake_case ( self , __A ): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCamelCase : Union[str, Any] = self.sp_model.PieceToId(__A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _snake_case ( self , __A ): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _snake_case ( self , __A ): """simple docstring""" lowerCamelCase : List[Any] = [] lowerCamelCase : Dict = "" lowerCamelCase : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__A ) + token lowerCamelCase : List[Any] = True lowerCamelCase : int = [] else: current_sub_tokens.append(__A ) lowerCamelCase : Optional[Any] = False out_string += self.sp_model.decode(__A ) return out_string.strip() def _snake_case ( self , __A , __A = None ): """simple docstring""" if not os.path.isdir(__A ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase : int = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __A ) elif not os.path.isfile(self.vocab_file ): with open(__A , "wb" ) as fi: lowerCamelCase : Tuple = self.sp_model.serialized_model_proto() fi.write(__A ) return (out_vocab_file,) def _snake_case ( self , __A , __A = None , __A = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) lowerCamelCase : Union[str, Any] = [1] * len(self.prefix_tokens ) lowerCamelCase : List[str] = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__A )) + suffix_ones return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones def _snake_case ( self , __A , __A = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _snake_case ( self , __A , __A , __A , __A , **__A ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) lowerCamelCase : int = src_lang lowerCamelCase : Any = self(__A , add_special_tokens=__A , return_tensors=__A , **__A ) lowerCamelCase : Union[str, Any] = self.convert_tokens_to_ids(__A ) lowerCamelCase : List[str] = tgt_lang_id return inputs def _snake_case ( self , __A , __A = "en_XX" , __A = None , __A = "ro_RO" , **__A , ): """simple docstring""" lowerCamelCase : Union[str, Any] = src_lang lowerCamelCase : Optional[Any] = tgt_lang return super().prepare_seqaseq_batch(__A , __A , **__A ) def _snake_case ( self ): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def _snake_case ( self ): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _snake_case ( self , __A ): """simple docstring""" lowerCamelCase : Tuple = self.lang_code_to_id[src_lang] lowerCamelCase : Dict = [self.cur_lang_code_id] lowerCamelCase : Optional[Any] = [self.eos_token_id] def _snake_case ( self , __A ): """simple docstring""" lowerCamelCase : Dict = self.lang_code_to_id[tgt_lang] lowerCamelCase : Any = [self.cur_lang_code_id] lowerCamelCase : List[str] = [self.eos_token_id]
283
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Dict = "mobilenet_v2" def __init__( self , __A=3 , __A=224 , __A=1.0 , __A=8 , __A=8 , __A=6 , __A=32 , __A=True , __A=True , __A="relu6" , __A=True , __A=0.8 , __A=0.02 , __A=0.001 , __A=255 , **__A , ): """simple docstring""" super().__init__(**__A ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) lowerCamelCase : str = num_channels lowerCamelCase : Any = image_size lowerCamelCase : Union[str, Any] = depth_multiplier lowerCamelCase : Tuple = depth_divisible_by lowerCamelCase : Dict = min_depth lowerCamelCase : Dict = expand_ratio lowerCamelCase : Optional[Any] = output_stride lowerCamelCase : int = first_layer_is_expansion lowerCamelCase : Union[str, Any] = finegrained_output lowerCamelCase : Optional[Any] = hidden_act lowerCamelCase : Optional[Any] = tf_padding lowerCamelCase : Optional[Any] = classifier_dropout_prob lowerCamelCase : Dict = initializer_range lowerCamelCase : str = layer_norm_eps lowerCamelCase : Optional[Any] = semantic_loss_ignore_index class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Union[str, Any] = version.parse("1.11" ) @property def _snake_case ( self ): """simple docstring""" return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _snake_case ( self ): """simple docstring""" if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _snake_case ( self ): """simple docstring""" return 1e-4
283
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _snake_case = { '''configuration_autoformer''': [ '''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AutoformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AutoformerForPrediction''', '''AutoformerModel''', '''AutoformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
283
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) _snake_case = 2_99_79_24_58 # Symbols _snake_case , _snake_case , _snake_case , _snake_case = symbols('''ct x y z''') def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if velocity > c: raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError("Speed must be greater than or equal to 1!" ) return velocity / c def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_ ) ** 2 ) def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return np.array( [ [gamma(SCREAMING_SNAKE_CASE_ ), -gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), 0, 0], [-gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), gamma(SCREAMING_SNAKE_CASE_ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' if event is None: lowerCamelCase : Tuple = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(SCREAMING_SNAKE_CASE_ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: _snake_case = transform(29_97_92_45) print('''Example of four vector: ''') print(f'''ct\' = {four_vector[0]}''') print(f'''x\' = {four_vector[1]}''') print(f'''y\' = {four_vector[2]}''') print(f'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values _snake_case = {ct: c, x: 1, y: 1, z: 1} _snake_case = [four_vector[i].subs(sub_dict) for i in range(4)] print(f'''\n{numerical_vector}''')
283
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter _snake_case = '''Create a default config file for Accelerate with only a few flags set.''' def lowercase_( SCREAMING_SNAKE_CASE_="no" , SCREAMING_SNAKE_CASE_ = default_json_config_file , SCREAMING_SNAKE_CASE_ = False ): '''simple docstring''' lowerCamelCase : List[str] = Path(SCREAMING_SNAKE_CASE_ ) path.parent.mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) if path.exists(): print( f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" ) return False lowerCamelCase : Optional[Any] = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" ) lowerCamelCase : int = { "compute_environment": "LOCAL_MACHINE", "mixed_precision": mixed_precision, } if torch.cuda.is_available(): lowerCamelCase : Union[str, Any] = torch.cuda.device_count() lowerCamelCase : List[str] = num_gpus lowerCamelCase : int = False if num_gpus > 1: lowerCamelCase : Dict = "MULTI_GPU" else: lowerCamelCase : str = "NO" elif is_xpu_available() and use_xpu: lowerCamelCase : Optional[Any] = torch.xpu.device_count() lowerCamelCase : Any = num_xpus lowerCamelCase : Dict = False if num_xpus > 1: lowerCamelCase : int = "MULTI_XPU" else: lowerCamelCase : Dict = "NO" elif is_npu_available(): lowerCamelCase : str = torch.npu.device_count() lowerCamelCase : str = num_npus lowerCamelCase : Union[str, Any] = False if num_npus > 1: lowerCamelCase : Any = "MULTI_NPU" else: lowerCamelCase : Union[str, Any] = "NO" else: lowerCamelCase : Any = 0 lowerCamelCase : List[Any] = True lowerCamelCase : str = 1 lowerCamelCase : Optional[int] = "NO" lowerCamelCase : Optional[Any] = ClusterConfig(**SCREAMING_SNAKE_CASE_ ) config.to_json_file(SCREAMING_SNAKE_CASE_ ) return path def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Dict = parser.add_parser("default" , parents=SCREAMING_SNAKE_CASE_ , help=SCREAMING_SNAKE_CASE_ , formatter_class=SCREAMING_SNAKE_CASE_ ) parser.add_argument( "--config_file" , default=SCREAMING_SNAKE_CASE_ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , dest="save_location" , ) parser.add_argument( "--mixed_precision" , choices=["no", "fp16", "bf16"] , type=SCREAMING_SNAKE_CASE_ , help="Whether or not to use mixed precision training. " "Choose between FP16 and BF16 (bfloat16) training. " "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , ) parser.set_defaults(func=SCREAMING_SNAKE_CASE_ ) return parser def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Tuple = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(f"""accelerate configuration saved at {config_file}""" )
283
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor _snake_case = logging.get_logger(__name__) class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' def __init__( self , *__A , **__A ): """simple docstring""" warnings.warn( "The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DPTImageProcessor instead." , __A , ) super().__init__(*__A , **__A )
283
1
def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if not all(char in "01" for char in bin_string ): raise ValueError("Non-binary value was passed to the function" ) if not bin_string: raise ValueError("Empty string was passed to the function" ) lowerCamelCase : str = "" while len(SCREAMING_SNAKE_CASE_ ) % 3 != 0: lowerCamelCase : Optional[int] = "0" + bin_string lowerCamelCase : List[str] = [ bin_string[index : index + 3] for index in range(len(SCREAMING_SNAKE_CASE_ ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: lowerCamelCase : int = 0 for index, val in enumerate(SCREAMING_SNAKE_CASE_ ): oct_val += int(2 ** (2 - index) * int(SCREAMING_SNAKE_CASE_ ) ) oct_string += str(SCREAMING_SNAKE_CASE_ ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
283
import argparse _snake_case = '''docs/source/_static/js/custom.js''' def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' with open(SCREAMING_SNAKE_CASE_ , encoding="utf-8" , newline="\n" ) as f: lowerCamelCase : List[str] = f.readlines() lowerCamelCase : int = 0 # First let's put the right version while not lines[index].startswith("const stableVersion =" ): index += 1 lowerCamelCase : str = f"""const stableVersion = \"v{version}\"\n""" # Then update the dictionary while not lines[index].startswith("const versionMapping = {" ): index += 1 # We go until the end while not lines[index].startswith("}" ): index += 1 # We add the new version at the end lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n""" with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument('''--version''', help='''Release version.''') _snake_case = parser.parse_args() update_custom_js(args.version)
283
1
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Tuple = [False] * len(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Any = [-1] * len(SCREAMING_SNAKE_CASE_ ) def dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowerCamelCase : Optional[int] = True lowerCamelCase : str = c for u in graph[v]: if not visited[u]: dfs(SCREAMING_SNAKE_CASE_ , 1 - c ) for i in range(len(SCREAMING_SNAKE_CASE_ ) ): if not visited[i]: dfs(SCREAMING_SNAKE_CASE_ , 0 ) for i in range(len(SCREAMING_SNAKE_CASE_ ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph _snake_case = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
283
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Any = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[int] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : str = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : List[Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[int] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Dict = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Dict = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Union[str, Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[int] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Any = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : int = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] )
283
1
import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' __A : List[str] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __A : List[str] = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def _snake_case ( self , __A , __A , __A ): """simple docstring""" lowerCamelCase : Optional[Any] = TextaTextGenerationPipeline(model=__A , tokenizer=__A ) return generator, ["Something to write", "Something else"] def _snake_case ( self , __A , __A ): """simple docstring""" lowerCamelCase : str = generator("Something there" ) self.assertEqual(__A , [{"generated_text": ANY(__A )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) ) lowerCamelCase : List[str] = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__A ) self.assertEqual( __A , [ [{"generated_text": ANY(__A )}, {"generated_text": ANY(__A )}], [{"generated_text": ANY(__A )}, {"generated_text": ANY(__A )}], ] , ) lowerCamelCase : Optional[Any] = generator( ["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__A ) self.assertEqual( __A , [ [{"generated_text": ANY(__A )}, {"generated_text": ANY(__A )}], [{"generated_text": ANY(__A )}, {"generated_text": ANY(__A )}], ] , ) with self.assertRaises(__A ): generator(4 ) @require_torch def _snake_case ( self ): """simple docstring""" lowerCamelCase : Any = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" ) # do_sample=False necessary for reproducibility lowerCamelCase : int = generator("Something there" , do_sample=__A ) self.assertEqual(__A , [{"generated_text": ""}] ) lowerCamelCase : Optional[Any] = 3 lowerCamelCase : Union[str, Any] = generator( "Something there" , num_return_sequences=__A , num_beams=__A , ) lowerCamelCase : Any = [ {"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"}, {"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"}, {"generated_text": ""}, ] self.assertEqual(__A , __A ) lowerCamelCase : Any = generator("This is a test" , do_sample=__A , num_return_sequences=2 , return_tensors=__A ) self.assertEqual( __A , [ {"generated_token_ids": ANY(torch.Tensor )}, {"generated_token_ids": ANY(torch.Tensor )}, ] , ) lowerCamelCase : Optional[Any] = generator.model.config.eos_token_id lowerCamelCase : int = "<pad>" lowerCamelCase : List[Any] = generator( ["This is a test", "This is a second test"] , do_sample=__A , num_return_sequences=2 , batch_size=2 , return_tensors=__A , ) self.assertEqual( __A , [ [ {"generated_token_ids": ANY(torch.Tensor )}, {"generated_token_ids": ANY(torch.Tensor )}, ], [ {"generated_token_ids": ANY(torch.Tensor )}, {"generated_token_ids": ANY(torch.Tensor )}, ], ] , ) @require_tf def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" ) # do_sample=False necessary for reproducibility lowerCamelCase : str = generator("Something there" , do_sample=__A ) self.assertEqual(__A , [{"generated_text": ""}] )
283
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Dict = "openai/whisper-base" __A : str = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __A : Any = "transcriber" __A : Any = WhisperProcessor __A : int = WhisperForConditionalGeneration __A : Any = ["audio"] __A : List[str] = ["text"] def _snake_case ( self , __A ): """simple docstring""" return self.pre_processor(__A , return_tensors="pt" ).input_features def _snake_case ( self , __A ): """simple docstring""" return self.model.generate(inputs=__A ) def _snake_case ( self , __A ): """simple docstring""" return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0]
283
1
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _snake_case = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_sentencepiece_available(): import sentencepiece as sp _snake_case = 5 _snake_case = 10 @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( UpperCamelCase , unittest.TestCase ): '''simple docstring''' __A : List[str] = SpeechaTextTokenizer __A : Any = False __A : List[str] = True def _snake_case ( self ): """simple docstring""" super().setUp() lowerCamelCase : Optional[int] = sp.SentencePieceProcessor() spm_model.Load(__A ) lowerCamelCase : List[Any] = ["<s>", "<pad>", "</s>", "<unk>"] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__A ) )] lowerCamelCase : Optional[Any] = dict(zip(__A , range(len(__A ) ) ) ) lowerCamelCase : str = Path(self.tmpdirname ) save_json(__A , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(__A , save_dir / VOCAB_FILES_NAMES["spm_file"] ) lowerCamelCase : Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = "<pad>" lowerCamelCase : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(__A ) , 1001 ) def _snake_case ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1001 ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[str] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) lowerCamelCase : int = tokenizer.tokenize("This is a test" ) self.assertListEqual(__A , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__A ) , [289, 50, 14, 174, 386] , ) lowerCamelCase : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __A , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__A ) self.assertListEqual(__A , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) lowerCamelCase : List[str] = tokenizer.convert_ids_to_tokens(__A ) self.assertListEqual( __A , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def _snake_case ( self ): """simple docstring""" lowerCamelCase : Any = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__A , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , ) @require_sentencepiece class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' __A : List[str] = "valhalla/s2t_mustc_multilinguial_medium" __A : Any = "C'est trop cool" __A : Union[str, Any] = "Esto es genial" @classmethod def _snake_case ( cls ): """simple docstring""" lowerCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def _snake_case ( self ): """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 ) def _snake_case ( self ): """simple docstring""" self.assertEqual(self.tokenizer.vocab_size , 1_0000 ) def _snake_case ( self ): """simple docstring""" self.assertIn(__A , self.tokenizer.all_special_ids ) lowerCamelCase : Any = [ES_CODE, 4, 1601, 47, 7647, 2] lowerCamelCase : List[Any] = self.tokenizer.decode(__A , skip_special_tokens=__A ) lowerCamelCase : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__A ) self.assertEqual(__A , __A ) self.assertNotIn(self.tokenizer.eos_token , __A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Tuple = "fr" lowerCamelCase : Tuple = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , __A ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[int] = "fr" self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) lowerCamelCase : Tuple = "es" self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
283
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if "model" in orig_key: lowerCamelCase : Dict = orig_key.replace("model." , "" ) if "norm1" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("norm1" , "attention.output.LayerNorm" ) if "norm2" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("norm2" , "output.LayerNorm" ) if "norm" in orig_key: lowerCamelCase : Optional[Any] = orig_key.replace("norm" , "LayerNorm" ) if "transformer" in orig_key: lowerCamelCase : int = orig_key.split("." )[0].split("_" )[-1] lowerCamelCase : Dict = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: lowerCamelCase : List[str] = orig_key.replace("mha.attn" , "attention.self" ) if "mha" in orig_key: lowerCamelCase : List[Any] = orig_key.replace("mha" , "attention" ) if "W_q" in orig_key: lowerCamelCase : Optional[int] = orig_key.replace("W_q" , "self.query" ) if "W_k" in orig_key: lowerCamelCase : List[Any] = orig_key.replace("W_k" , "self.key" ) if "W_v" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("W_v" , "self.value" ) if "ff1" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("ff1" , "intermediate.dense" ) if "ff2" in orig_key: lowerCamelCase : Optional[int] = orig_key.replace("ff2" , "output.dense" ) if "ff" in orig_key: lowerCamelCase : Optional[int] = orig_key.replace("ff" , "output.dense" ) if "mlm_class" in orig_key: lowerCamelCase : Dict = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" ) if "mlm" in orig_key: lowerCamelCase : List[Any] = orig_key.replace("mlm" , "cls.predictions.transform" ) if "cls" not in orig_key: lowerCamelCase : int = "yoso." + orig_key return orig_key def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCamelCase : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if ("pooler" in key) or ("sen_class" in key): continue else: lowerCamelCase : Dict = val lowerCamelCase : Dict = orig_state_dict["cls.predictions.decoder.bias"] lowerCamelCase : Dict = torch.arange(SCREAMING_SNAKE_CASE_ ).expand((1, -1) ) + 2 return orig_state_dict def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : List[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model_state_dict"] lowerCamelCase : List[str] = YosoConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Any = YosoForMaskedLM(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : List[Any] = convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE_ ) print(model.load_state_dict(SCREAMING_SNAKE_CASE_ ) ) model.eval() model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The json file for YOSO model config.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _snake_case = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
283
1
import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class UpperCAmelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , __A , __A , __A = None , __A = None ): """simple docstring""" super().__init__() lowerCamelCase : Any = pad_token_id lowerCamelCase : Optional[Any] = max_length lowerCamelCase : str = vocab lowerCamelCase : int = merges lowerCamelCase : str = BytePairTokenizer(__A , __A , sequence_length=__A ) @classmethod def _snake_case ( cls , __A , *__A , **__A ): """simple docstring""" lowerCamelCase : Dict = [" ".join(__A ) for m in tokenizer.bpe_ranks.keys()] lowerCamelCase : Optional[Any] = tokenizer.get_vocab() return cls(__A , __A , *__A , **__A ) @classmethod def _snake_case ( cls , __A , *__A , **__A ): """simple docstring""" lowerCamelCase : int = GPTaTokenizer.from_pretrained(__A , *__A , **__A ) return cls.from_tokenizer(__A , *__A , **__A ) @classmethod def _snake_case ( cls , __A ): """simple docstring""" return cls(**__A ) def _snake_case ( self ): """simple docstring""" return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def _snake_case ( self , __A , __A = None ): """simple docstring""" lowerCamelCase : int = self.tf_tokenizer(__A ) lowerCamelCase : Any = tf.ones_like(__A ) if self.pad_token_id is not None: # pad the tokens up to max length lowerCamelCase : Optional[int] = max_length if max_length is not None else self.max_length if max_length is not None: lowerCamelCase , lowerCamelCase : Optional[Any] = pad_model_inputs( __A , max_seq_length=__A , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
283
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Optional[int] = "M-CLIP" def __init__( self , __A=1024 , __A=768 , **__A ): """simple docstring""" lowerCamelCase : str = transformerDimSize lowerCamelCase : Any = imageDimSize super().__init__(**__A ) class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Tuple = MCLIPConfig def __init__( self , __A , *__A , **__A ): """simple docstring""" super().__init__(__A , *__A , **__A ) lowerCamelCase : Tuple = XLMRobertaModel(__A ) lowerCamelCase : Optional[Any] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def _snake_case ( self , __A , __A ): """simple docstring""" lowerCamelCase : Any = self.transformer(input_ids=__A , attention_mask=__A )[0] lowerCamelCase : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(__A ), embs
283
1
from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' def _snake_case ( self ): """simple docstring""" lowerCamelCase : int = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__A , "embed_dim" ) ) self.parent.assertTrue(hasattr(__A , "num_heads" ) ) class UpperCAmelCase_ : '''simple docstring''' def __init__( self , __A , __A=13 , __A=64 , __A=3 , __A=[16, 48, 96] , __A=[1, 3, 6] , __A=[1, 2, 10] , __A=[7, 3, 3] , __A=[4, 2, 2] , __A=[2, 1, 1] , __A=[2, 2, 2] , __A=[False, False, True] , __A=[0.0, 0.0, 0.0] , __A=0.02 , __A=1e-12 , __A=True , __A=True , __A=2 , ): """simple docstring""" lowerCamelCase : Union[str, Any] = parent lowerCamelCase : Union[str, Any] = batch_size lowerCamelCase : Union[str, Any] = image_size lowerCamelCase : List[str] = patch_sizes lowerCamelCase : Tuple = patch_stride lowerCamelCase : Dict = patch_padding lowerCamelCase : Optional[int] = is_training lowerCamelCase : Optional[Any] = use_labels lowerCamelCase : Optional[Any] = num_labels lowerCamelCase : List[str] = num_channels lowerCamelCase : List[Any] = embed_dim lowerCamelCase : int = num_heads lowerCamelCase : List[Any] = stride_kv lowerCamelCase : Dict = depth lowerCamelCase : Any = cls_token lowerCamelCase : List[Any] = attention_drop_rate lowerCamelCase : Tuple = initializer_range lowerCamelCase : Optional[int] = layer_norm_eps def _snake_case ( self ): """simple docstring""" lowerCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : Optional[int] = None if self.use_labels: # create a random int32 tensor of given shape lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase : List[Any] = self.get_config() return config, pixel_values, labels def _snake_case ( self ): """simple docstring""" return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def _snake_case ( self , __A , __A , __A ): """simple docstring""" lowerCamelCase : Any = TFCvtModel(config=__A ) lowerCamelCase : Tuple = model(__A , training=__A ) lowerCamelCase : Optional[Any] = (self.image_size, self.image_size) lowerCamelCase , lowerCamelCase : Any = image_size[0], image_size[1] for i in range(len(self.depth ) ): lowerCamelCase : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) lowerCamelCase : List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def _snake_case ( self , __A , __A , __A ): """simple docstring""" lowerCamelCase : List[Any] = self.num_labels lowerCamelCase : Optional[int] = TFCvtForImageClassification(__A ) lowerCamelCase : Dict = model(__A , labels=__A , training=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Tuple = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : int = config_and_inputs lowerCamelCase : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): '''simple docstring''' __A : Dict = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () __A : Tuple = ( {"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification} if is_tf_available() else {} ) __A : List[str] = False __A : Tuple = False __A : Tuple = False __A : Union[str, Any] = False __A : Any = False def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[str] = TFCvtModelTester(self ) lowerCamelCase : List[Any] = TFCvtConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 ) def _snake_case ( self ): """simple docstring""" self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="Cvt does not output attentions" ) def _snake_case ( self ): """simple docstring""" pass @unittest.skip(reason="Cvt does not use inputs_embeds" ) def _snake_case ( self ): """simple docstring""" pass @unittest.skip(reason="Cvt does not support input and output embeddings" ) def _snake_case ( self ): """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) def _snake_case ( self ): """simple docstring""" super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def _snake_case ( self ): """simple docstring""" super().test_keras_fit() @unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Tuple = tf.keras.mixed_precision.Policy("mixed_float16" ) tf.keras.mixed_precision.set_global_policy(__A ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("float32" ) def _snake_case ( self ): """simple docstring""" lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : Any = model_class(__A ) lowerCamelCase : List[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Optional[int] = [*signature.parameters.keys()] lowerCamelCase : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __A ) def _snake_case ( self ): """simple docstring""" def check_hidden_states_output(__A , __A , __A ): lowerCamelCase : List[Any] = model_class(__A ) lowerCamelCase : Tuple = model(**self._prepare_for_class(__A , __A ) ) lowerCamelCase : List[str] = outputs.hidden_states lowerCamelCase : Dict = len(self.model_tester.depth ) self.assertEqual(len(__A ) , __A ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : List[Any] = True check_hidden_states_output(__A , __A , __A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : Tuple = True check_hidden_states_output(__A , __A , __A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) @slow def _snake_case ( self ): """simple docstring""" for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : str = TFCvtModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def lowercase_( ): '''simple docstring''' lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self ): """simple docstring""" return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowerCamelCase : List[Any] = self.default_image_processor lowerCamelCase : str = prepare_img() lowerCamelCase : int = image_processor(images=__A , return_tensors="tf" ) # forward pass lowerCamelCase : Optional[int] = model(**__A ) # verify the logits lowerCamelCase : Optional[int] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __A ) lowerCamelCase : Any = tf.constant([0.9285, 0.9015, -0.3150] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __A , atol=1e-4 ) )
283
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = tempfile.mkdtemp() # fmt: off lowerCamelCase : Any = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on lowerCamelCase : List[Any] = dict(zip(__A , range(len(__A ) ) ) ) lowerCamelCase : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] lowerCamelCase : Optional[Any] = {"unk_token": "<unk>"} lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__A ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__A ) ) lowerCamelCase : str = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } lowerCamelCase : str = os.path.join(self.tmpdirname , __A ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(__A , __A ) def _snake_case ( self , **__A ): """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__A ) def _snake_case ( self , **__A ): """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__A ) def _snake_case ( self , **__A ): """simple docstring""" return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__A ) def _snake_case ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase : Tuple = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = self.get_tokenizer() lowerCamelCase : Optional[Any] = self.get_rust_tokenizer() lowerCamelCase : Tuple = self.get_image_processor() lowerCamelCase : List[Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A ) processor_slow.save_pretrained(self.tmpdirname ) lowerCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__A ) lowerCamelCase : Optional[int] = OwlViTProcessor(tokenizer=__A , image_processor=__A ) processor_fast.save_pretrained(self.tmpdirname ) lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __A ) self.assertIsInstance(processor_fast.tokenizer , __A ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __A ) self.assertIsInstance(processor_fast.image_processor , __A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowerCamelCase : List[str] = self.get_image_processor(do_normalize=__A ) lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__A ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = self.get_image_processor() lowerCamelCase : Optional[int] = self.get_tokenizer() lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Tuple = self.prepare_image_inputs() lowerCamelCase : int = image_processor(__A , return_tensors="np" ) lowerCamelCase : Union[str, Any] = processor(images=__A , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Union[str, Any] = self.get_image_processor() lowerCamelCase : Dict = self.get_tokenizer() lowerCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Tuple = "lower newer" lowerCamelCase : Union[str, Any] = processor(text=__A , return_tensors="np" ) lowerCamelCase : List[Any] = tokenizer(__A , return_tensors="np" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Any = self.get_image_processor() lowerCamelCase : Any = self.get_tokenizer() lowerCamelCase : int = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Optional[Any] = "lower newer" lowerCamelCase : Dict = self.prepare_image_inputs() lowerCamelCase : Any = processor(text=__A , images=__A ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Any = "google/owlvit-base-patch32" lowerCamelCase : List[Any] = OwlViTProcessor.from_pretrained(__A ) lowerCamelCase : Tuple = ["cat", "nasa badge"] lowerCamelCase : str = processor(text=__A ) lowerCamelCase : Union[str, Any] = 16 self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = "google/owlvit-base-patch32" lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(__A ) lowerCamelCase : Dict = [["cat", "nasa badge"], ["person"]] lowerCamelCase : int = processor(text=__A ) lowerCamelCase : Tuple = 16 lowerCamelCase : Any = len(__A ) lowerCamelCase : Optional[Any] = max([len(__A ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = "google/owlvit-base-patch32" lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(__A ) lowerCamelCase : List[Any] = ["cat", "nasa badge"] lowerCamelCase : Optional[Any] = processor(text=__A ) lowerCamelCase : int = 16 lowerCamelCase : List[str] = inputs["input_ids"] lowerCamelCase : int = [ [4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Any = self.get_image_processor() lowerCamelCase : List[str] = self.get_tokenizer() lowerCamelCase : str = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Dict = self.prepare_image_inputs() lowerCamelCase : Union[str, Any] = self.prepare_image_inputs() lowerCamelCase : Any = processor(images=__A , query_images=__A ) self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[Any] = self.get_image_processor() lowerCamelCase : Optional[int] = self.get_tokenizer() lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase : List[Any] = processor.batch_decode(__A ) lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(__A ) self.assertListEqual(__A , __A )
283
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _snake_case = { '''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''], '''tokenization_biogpt''': ['''BioGptTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BioGptForCausalLM''', '''BioGptForTokenClassification''', '''BioGptForSequenceClassification''', '''BioGptModel''', '''BioGptPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
283
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase_ ( UpperCamelCase , unittest.TestCase ): '''simple docstring''' __A : List[Any] = BioGptTokenizer __A : Optional[int] = False def _snake_case ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase : Union[str, Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] lowerCamelCase : str = dict(zip(__A , range(len(__A ) ) ) ) lowerCamelCase : Dict = ["l o 123", "lo w 1456", "e r</w> 1789", ""] lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(__A ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(__A ) ) def _snake_case ( self , __A ): """simple docstring""" lowerCamelCase : Dict = "lower newer" lowerCamelCase : Union[str, Any] = "lower newer" return input_text, output_text def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[str] = BioGptTokenizer(self.vocab_file , self.merges_file ) lowerCamelCase : Optional[int] = "lower" lowerCamelCase : Any = ["low", "er</w>"] lowerCamelCase : List[str] = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCamelCase : Union[str, Any] = tokens + ["<unk>"] lowerCamelCase : List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) @slow def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[str] = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) lowerCamelCase : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=__A ) lowerCamelCase : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=__A ) lowerCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__A ) lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
283
1
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} _snake_case = { '''vocab_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), }, '''merges_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), }, } _snake_case = { '''allenai/longformer-base-4096''': 40_96, '''allenai/longformer-large-4096''': 40_96, '''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96, '''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96, '''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowercase_( ): '''simple docstring''' lowerCamelCase : List[str] = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) lowerCamelCase : Tuple = bs[:] lowerCamelCase : List[str] = 0 for b in range(2**8 ): if b not in bs: bs.append(SCREAMING_SNAKE_CASE_ ) cs.append(2**8 + n ) n += 1 lowerCamelCase : List[str] = [chr(SCREAMING_SNAKE_CASE_ ) for n in cs] return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : int = set() lowerCamelCase : Optional[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase : List[str] = char return pairs class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Dict = VOCAB_FILES_NAMES __A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : List[Any] = ["input_ids", "attention_mask"] def __init__( self , __A , __A , __A="replace" , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=False , **__A , ): """simple docstring""" lowerCamelCase : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token lowerCamelCase : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token lowerCamelCase : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token lowerCamelCase : str = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token lowerCamelCase : int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token lowerCamelCase : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase : int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , ) with open(__A , encoding="utf-8" ) as vocab_handle: lowerCamelCase : Union[str, Any] = json.load(__A ) lowerCamelCase : Optional[Any] = {v: k for k, v in self.encoder.items()} lowerCamelCase : Optional[Any] = errors # how to handle errors in decoding lowerCamelCase : List[Any] = bytes_to_unicode() lowerCamelCase : str = {v: k for k, v in self.byte_encoder.items()} with open(__A , encoding="utf-8" ) as merges_handle: lowerCamelCase : Optional[int] = merges_handle.read().split("\n" )[1:-1] lowerCamelCase : Tuple = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase : Dict = dict(zip(__A , range(len(__A ) ) ) ) lowerCamelCase : List[Any] = {} lowerCamelCase : Any = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase : Dict = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def _snake_case ( self ): """simple docstring""" return len(self.encoder ) def _snake_case ( self ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self , __A ): """simple docstring""" if token in self.cache: return self.cache[token] lowerCamelCase : List[str] = tuple(__A ) lowerCamelCase : Optional[Any] = get_pairs(__A ) if not pairs: return token while True: lowerCamelCase : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase , lowerCamelCase : int = bigram lowerCamelCase : int = [] lowerCamelCase : int = 0 while i < len(__A ): try: lowerCamelCase : Tuple = word.index(__A , __A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase : Optional[Any] = j if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase : str = tuple(__A ) lowerCamelCase : str = new_word if len(__A ) == 1: break else: lowerCamelCase : Tuple = get_pairs(__A ) lowerCamelCase : Any = " ".join(__A ) lowerCamelCase : Optional[Any] = word return word def _snake_case ( self , __A ): """simple docstring""" lowerCamelCase : Optional[Any] = [] for token in re.findall(self.pat , __A ): lowerCamelCase : Dict = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) ) return bpe_tokens def _snake_case ( self , __A ): """simple docstring""" return self.encoder.get(__A , self.encoder.get(self.unk_token ) ) def _snake_case ( self , __A ): """simple docstring""" return self.decoder.get(__A ) def _snake_case ( self , __A ): """simple docstring""" lowerCamelCase : Any = "".join(__A ) lowerCamelCase : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self , __A , __A = None ): """simple docstring""" if not os.path.isdir(__A ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase : Union[str, Any] = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase : List[str] = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__A , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" ) lowerCamelCase : Optional[Any] = 0 with open(__A , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) lowerCamelCase : List[Any] = token_index writer.write(" ".join(__A ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self , __A , __A = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase : Tuple = [self.cls_token_id] lowerCamelCase : Optional[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self , __A , __A = None , __A = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) if token_ids_a is None: return [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1] def _snake_case ( self , __A , __A = None ): """simple docstring""" lowerCamelCase : Optional[Any] = [self.sep_token_id] lowerCamelCase : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self , __A , __A=False , **__A ): """simple docstring""" lowerCamelCase : Dict = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()): lowerCamelCase : Tuple = " " + text return (text, kwargs)
283
def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if divisor % 5 == 0 or divisor % 2 == 0: return 0 lowerCamelCase : List[Any] = 1 lowerCamelCase : Union[str, Any] = 1 while repunit: lowerCamelCase : Union[str, Any] = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def lowercase_( SCREAMING_SNAKE_CASE_ = 1000000 ): '''simple docstring''' lowerCamelCase : List[str] = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(SCREAMING_SNAKE_CASE_ ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f'''{solution() = }''')
283
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _snake_case = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''BartphoTokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
283
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ): '''simple docstring''' lowerCamelCase : Tuple = "backbone." if is_semantic else "" lowerCamelCase : int = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ (f"""{prefix}cls_token""", "beit.embeddings.cls_token"), (f"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"), (f"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"), (f"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ("mask_token", "beit.embeddings.mask_token"), ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) else: # layernorm + classification head rename_keys.extend( [ ("fc_norm.weight", "beit.pooler.layernorm.weight"), ("fc_norm.bias", "beit.pooler.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): lowerCamelCase : Optional[Any] = "backbone." if is_semantic else "" # queries, keys and values lowerCamelCase : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" ) lowerCamelCase : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" ) lowerCamelCase : Tuple = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" ) lowerCamelCase : str = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase : Any = q_bias lowerCamelCase : Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase : Optional[int] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase : int = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowerCamelCase : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" ) lowerCamelCase : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" ) lowerCamelCase : int = gamma_a lowerCamelCase : Optional[Any] = gamma_a def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Optional[Any] = dct.pop(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : List[Any] = val def lowercase_( ): '''simple docstring''' lowerCamelCase : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): '''simple docstring''' lowerCamelCase : List[Any] = False if "rvlcdip" in checkpoint_url else True lowerCamelCase : str = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE_ , use_mask_token=SCREAMING_SNAKE_CASE_ ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowerCamelCase : Union[str, Any] = 1024 lowerCamelCase : Any = 4096 lowerCamelCase : str = 24 lowerCamelCase : List[Any] = 16 # labels if "rvlcdip" in checkpoint_url: lowerCamelCase : Optional[Any] = 16 lowerCamelCase : Tuple = "huggingface/label-files" lowerCamelCase : List[str] = "rvlcdip-id2label.json" lowerCamelCase : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) lowerCamelCase : Any = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} lowerCamelCase : Tuple = idalabel lowerCamelCase : Dict = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowerCamelCase : int = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"] lowerCamelCase : Tuple = create_rename_keys(SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ ) # load HuggingFace model lowerCamelCase : List[Any] = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE_ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE_ ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # Check outputs on an image lowerCamelCase : str = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Any = prepare_img() lowerCamelCase : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ) lowerCamelCase : Optional[Any] = encoding["pixel_values"] lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Dict = outputs.logits # verify logits lowerCamelCase : List[Any] = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE_ ), "Shape of logits not as expected" Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: if has_lm_head: lowerCamelCase : Optional[Any] = "dit-base" if "base" in checkpoint_url else "dit-large" else: lowerCamelCase : Dict = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip" image_processor.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) model.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) _snake_case = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
283
1
import re import string import numpy as np import datasets _snake_case = ''' Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. ''' _snake_case = ''' Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 25.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 50.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 75.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results["exact_match"], 1)) 100.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."] >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 33.3 ''' _snake_case = ''' ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): '''simple docstring''' def _snake_case ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , reference_urls=[] , ) def _snake_case ( self , __A , __A , __A=None , __A=False , __A=False , __A=False , ): """simple docstring""" if regexes_to_ignore is not None: for s in regexes_to_ignore: lowerCamelCase : Any = np.array([re.sub(__A , "" , __A ) for x in predictions] ) lowerCamelCase : List[Any] = np.array([re.sub(__A , "" , __A ) for x in references] ) else: lowerCamelCase : Tuple = np.asarray(__A ) lowerCamelCase : Dict = np.asarray(__A ) if ignore_case: lowerCamelCase : Optional[Any] = np.char.lower(__A ) lowerCamelCase : List[str] = np.char.lower(__A ) if ignore_punctuation: lowerCamelCase : List[str] = string.punctuation.maketrans("" , "" , string.punctuation ) lowerCamelCase : Any = np.char.translate(__A , table=__A ) lowerCamelCase : List[Any] = np.char.translate(__A , table=__A ) if ignore_numbers: lowerCamelCase : Any = string.digits.maketrans("" , "" , string.digits ) lowerCamelCase : Optional[int] = np.char.translate(__A , table=__A ) lowerCamelCase : Any = np.char.translate(__A , table=__A ) lowerCamelCase : int = predictions == references return {"exact_match": np.mean(__A ) * 100}
283
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Dict = ["image_processor", "tokenizer"] __A : Dict = "BridgeTowerImageProcessor" __A : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , __A , __A ): """simple docstring""" super().__init__(__A , __A ) def __call__( self , __A , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ): """simple docstring""" lowerCamelCase : str = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) # add pixel_values + pixel_mask lowerCamelCase : int = self.image_processor( __A , return_tensors=__A , do_normalize=__A , do_center_crop=__A , **__A ) encoding.update(__A ) return encoding def _snake_case ( self , *__A , **__A ): """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self , *__A , **__A ): """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = self.tokenizer.model_input_names lowerCamelCase : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
283
1
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets _snake_case = ''' @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' _snake_case = '''\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. ''' _snake_case = ''' Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=["About 95 species are currently accepted ."] >>> predictions=["About 95 you now get in ."] >>> references=[["About 95 species are currently known ."]] >>> wiki_split = datasets.load_metric("wiki_split") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0} ''' def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def remove_articles(SCREAMING_SNAKE_CASE_ ): lowerCamelCase : int = re.compile(r"\b(a|an|the)\b" , re.UNICODE ) return re.sub(SCREAMING_SNAKE_CASE_ , " " , SCREAMING_SNAKE_CASE_ ) def white_space_fix(SCREAMING_SNAKE_CASE_ ): return " ".join(text.split() ) def remove_punc(SCREAMING_SNAKE_CASE_ ): lowerCamelCase : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(SCREAMING_SNAKE_CASE_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE_ ) ) ) ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return int(normalize_answer(SCREAMING_SNAKE_CASE_ ) == normalize_answer(SCREAMING_SNAKE_CASE_ ) ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : str = [any(compute_exact(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for ref in refs ) for pred, refs in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] return (sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )) * 100 def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : str = [rgram for rgrams in rgramslist for rgram in rgrams] lowerCamelCase : Union[str, Any] = Counter(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Tuple = Counter(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Optional[int] = Counter() for sgram, scount in sgramcounter.items(): lowerCamelCase : Dict = scount * numref lowerCamelCase : str = Counter(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Union[str, Any] = Counter() for cgram, ccount in cgramcounter.items(): lowerCamelCase : Dict = ccount * numref # KEEP lowerCamelCase : Tuple = sgramcounter_rep & cgramcounter_rep lowerCamelCase : Dict = keepgramcounter_rep & rgramcounter lowerCamelCase : int = sgramcounter_rep & rgramcounter lowerCamelCase : List[Any] = 0 lowerCamelCase : Union[str, Any] = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. lowerCamelCase : Optional[Any] = 1 lowerCamelCase : Any = 1 if len(SCREAMING_SNAKE_CASE_ ) > 0: lowerCamelCase : Dict = keeptmpscorea / len(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) lowerCamelCase : Union[str, Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() ) lowerCamelCase : Union[str, Any] = 0 if keepscore_precision > 0 or keepscore_recall > 0: lowerCamelCase : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION lowerCamelCase : List[Any] = sgramcounter_rep - cgramcounter_rep lowerCamelCase : Union[str, Any] = delgramcounter_rep - rgramcounter lowerCamelCase : Optional[int] = sgramcounter_rep - rgramcounter lowerCamelCase : int = 0 lowerCamelCase : str = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. lowerCamelCase : List[Any] = 1 if len(SCREAMING_SNAKE_CASE_ ) > 0: lowerCamelCase : Optional[int] = deltmpscorea / len(SCREAMING_SNAKE_CASE_ ) # ADDITION lowerCamelCase : Tuple = set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Optional[int] = set(SCREAMING_SNAKE_CASE_ ) & set(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : List[Any] = set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Tuple = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. lowerCamelCase : List[Any] = 1 lowerCamelCase : int = 1 if len(SCREAMING_SNAKE_CASE_ ) > 0: lowerCamelCase : Tuple = addtmpscore / len(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: lowerCamelCase : Tuple = addtmpscore / len(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Union[str, Any] = 0 if addscore_precision > 0 or addscore_recall > 0: lowerCamelCase : Union[str, Any] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Any = len(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : List[str] = ssent.split(" " ) lowerCamelCase : Optional[int] = csent.split(" " ) lowerCamelCase : str = [] lowerCamelCase : Optional[Any] = [] lowerCamelCase : Tuple = [] lowerCamelCase : int = [] lowerCamelCase : Optional[int] = [] lowerCamelCase : int = [] lowerCamelCase : Optional[Any] = [] lowerCamelCase : int = [] lowerCamelCase : Dict = [] lowerCamelCase : List[str] = [] for rsent in rsents: lowerCamelCase : List[Any] = rsent.split(" " ) lowerCamelCase : Union[str, Any] = [] lowerCamelCase : Tuple = [] lowerCamelCase : Union[str, Any] = [] ragramslist.append(SCREAMING_SNAKE_CASE_ ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ): if i < len(SCREAMING_SNAKE_CASE_ ) - 1: lowerCamelCase : Optional[int] = ragrams[i] + " " + ragrams[i + 1] ragrams.append(SCREAMING_SNAKE_CASE_ ) if i < len(SCREAMING_SNAKE_CASE_ ) - 2: lowerCamelCase : Optional[Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(SCREAMING_SNAKE_CASE_ ) if i < len(SCREAMING_SNAKE_CASE_ ) - 3: lowerCamelCase : List[str] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(SCREAMING_SNAKE_CASE_ ) ragramslist.append(SCREAMING_SNAKE_CASE_ ) ragramslist.append(SCREAMING_SNAKE_CASE_ ) ragramslist.append(SCREAMING_SNAKE_CASE_ ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ): if i < len(SCREAMING_SNAKE_CASE_ ) - 1: lowerCamelCase : List[str] = sagrams[i] + " " + sagrams[i + 1] sagrams.append(SCREAMING_SNAKE_CASE_ ) if i < len(SCREAMING_SNAKE_CASE_ ) - 2: lowerCamelCase : str = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(SCREAMING_SNAKE_CASE_ ) if i < len(SCREAMING_SNAKE_CASE_ ) - 3: lowerCamelCase : Optional[int] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(SCREAMING_SNAKE_CASE_ ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ): if i < len(SCREAMING_SNAKE_CASE_ ) - 1: lowerCamelCase : Optional[Any] = cagrams[i] + " " + cagrams[i + 1] cagrams.append(SCREAMING_SNAKE_CASE_ ) if i < len(SCREAMING_SNAKE_CASE_ ) - 2: lowerCamelCase : Optional[int] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(SCREAMING_SNAKE_CASE_ ) if i < len(SCREAMING_SNAKE_CASE_ ) - 3: lowerCamelCase : Tuple = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(SCREAMING_SNAKE_CASE_ ) ((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : Tuple = SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : Tuple = SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : List[str] = SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : Optional[Any] = SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase : int = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 lowerCamelCase : List[Any] = sum([delascore, delascore, delascore, delascore] ) / 4 lowerCamelCase : Tuple = sum([addascore, addascore, addascore, addascore] ) / 4 lowerCamelCase : Union[str, Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = "13a" , SCREAMING_SNAKE_CASE_ = True ): '''simple docstring''' if lowercase: lowerCamelCase : Union[str, Any] = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: lowerCamelCase : List[str] = sacrebleu.metrics.bleu._get_tokenizer(SCREAMING_SNAKE_CASE_ )()(SCREAMING_SNAKE_CASE_ ) else: lowerCamelCase : Union[str, Any] = sacrebleu.TOKENIZERS[tokenizer]()(SCREAMING_SNAKE_CASE_ ) elif tokenizer == "moses": lowerCamelCase : Tuple = sacremoses.MosesTokenizer().tokenize(SCREAMING_SNAKE_CASE_ , return_str=SCREAMING_SNAKE_CASE_ , escape=SCREAMING_SNAKE_CASE_ ) elif tokenizer == "penn": lowerCamelCase : str = sacremoses.MosesTokenizer().penn_tokenize(SCREAMING_SNAKE_CASE_ , return_str=SCREAMING_SNAKE_CASE_ ) else: lowerCamelCase : Optional[Any] = sentence if not return_str: lowerCamelCase : Optional[Any] = normalized_sent.split() return normalized_sent def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if not (len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )): raise ValueError("Sources length must match predictions and references lengths." ) lowerCamelCase : int = 0 for src, pred, refs in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): sari_score += SARIsent(normalize(SCREAMING_SNAKE_CASE_ ) , normalize(SCREAMING_SNAKE_CASE_ ) , [normalize(SCREAMING_SNAKE_CASE_ ) for sent in refs] ) lowerCamelCase : Optional[int] = sari_score / len(SCREAMING_SNAKE_CASE_ ) return 100 * sari_score def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="exp" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ): '''simple docstring''' lowerCamelCase : int = len(references[0] ) if any(len(SCREAMING_SNAKE_CASE_ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) lowerCamelCase : Dict = [[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE_ )] lowerCamelCase : Any = sacrebleu.corpus_bleu( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , smooth_method=SCREAMING_SNAKE_CASE_ , smooth_value=SCREAMING_SNAKE_CASE_ , force=SCREAMING_SNAKE_CASE_ , lowercase=SCREAMING_SNAKE_CASE_ , use_effective_order=SCREAMING_SNAKE_CASE_ , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): '''simple docstring''' def _snake_case ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def _snake_case ( self , __A , __A , __A ): """simple docstring""" lowerCamelCase : Any = {} result.update({"sari": compute_sari(sources=__A , predictions=__A , references=__A )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=__A , references=__A )} ) result.update({"exact": compute_em(predictions=__A , references=__A )} ) return result
283
def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 , 0 , -1 ): lowerCamelCase : Tuple = False for j in range(SCREAMING_SNAKE_CASE_ , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: lowerCamelCase , lowerCamelCase : int = unsorted[j - 1], unsorted[j] lowerCamelCase : Optional[int] = True for j in range(SCREAMING_SNAKE_CASE_ ): if unsorted[j] > unsorted[j + 1]: lowerCamelCase , lowerCamelCase : Union[str, Any] = unsorted[j + 1], unsorted[j] lowerCamelCase : Optional[Any] = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() _snake_case = input('''Enter numbers separated by a comma:\n''').strip() _snake_case = [int(item) for item in user_input.split(''',''')] print(f'''{cocktail_shaker_sort(unsorted) = }''')
283
1
import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( UpperCamelCase , unittest.TestCase ): '''simple docstring''' __A : str = CodeGenTokenizer __A : Dict = CodeGenTokenizerFast __A : List[Any] = True __A : List[str] = {"add_prefix_space": True} __A : List[str] = False def _snake_case ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase : str = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] lowerCamelCase : Dict = dict(zip(__A , range(len(__A ) ) ) ) lowerCamelCase : List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowerCamelCase : List[str] = {"unk_token": "<unk>"} lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__A ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__A ) ) def _snake_case ( self , **__A ): """simple docstring""" kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__A ) def _snake_case ( self , **__A ): """simple docstring""" kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__A ) def _snake_case ( self , __A ): """simple docstring""" lowerCamelCase : List[str] = "lower newer" lowerCamelCase : Optional[int] = "lower newer" return input_text, output_text def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCamelCase : Dict = "lower newer" lowerCamelCase : Any = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] lowerCamelCase : int = tokenizer.tokenize(__A , add_prefix_space=__A ) self.assertListEqual(__A , __A ) lowerCamelCase : Tuple = tokens + [tokenizer.unk_token] lowerCamelCase : Tuple = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) def _snake_case ( self ): """simple docstring""" if not self.test_rust_tokenizer: return lowerCamelCase : int = self.get_tokenizer() lowerCamelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=__A ) lowerCamelCase : int = "lower newer" # Testing tokenization lowerCamelCase : Union[str, Any] = tokenizer.tokenize(__A , add_prefix_space=__A ) lowerCamelCase : Optional[int] = rust_tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) # Testing conversion to ids without special tokens lowerCamelCase : Optional[Any] = tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A ) lowerCamelCase : List[Any] = rust_tokenizer.encode(__A , add_special_tokens=__A ) self.assertListEqual(__A , __A ) # Testing conversion to ids with special tokens lowerCamelCase : int = self.get_rust_tokenizer(add_prefix_space=__A ) lowerCamelCase : Dict = tokenizer.encode(__A , add_prefix_space=__A ) lowerCamelCase : Dict = rust_tokenizer.encode(__A ) self.assertListEqual(__A , __A ) # Testing the unknown token lowerCamelCase : Any = tokens + [rust_tokenizer.unk_token] lowerCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__A ) , __A ) def _snake_case ( self , *__A , **__A ): """simple docstring""" pass def _snake_case ( self , __A=15 ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(__A , **__A ) # Simple input lowerCamelCase : Dict = "This is a simple input" lowerCamelCase : List[Any] = ["This is a simple input 1", "This is a simple input 2"] lowerCamelCase : int = ("This is a simple input", "This is a pair") lowerCamelCase : Optional[int] = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="max_length" ) # Simple input self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="max_length" ) # Simple input self.assertRaises( __A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="max_length" , ) # Pair input self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="max_length" ) # Pair input self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="max_length" ) # Pair input self.assertRaises( __A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="max_length" , ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input lowerCamelCase : Optional[int] = "This is a simple input" lowerCamelCase : int = ["This is a simple input looooooooong", "This is a simple input"] lowerCamelCase : List[Any] = ("This is a simple input", "This is a pair") lowerCamelCase : Union[str, Any] = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] lowerCamelCase : Any = tokenizer.pad_token_id lowerCamelCase : Any = tokenizer(__A , padding="max_length" , max_length=30 , return_tensors="np" ) lowerCamelCase : Any = tokenizer(__A , padding=__A , truncate=__A , return_tensors="np" ) lowerCamelCase : List[str] = tokenizer(*__A , padding="max_length" , max_length=60 , return_tensors="np" ) lowerCamelCase : Tuple = tokenizer(__A , padding=__A , truncate=__A , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = "$$$" lowerCamelCase : int = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__A , add_bos_token=__A ) lowerCamelCase : Any = "This is a simple input" lowerCamelCase : int = ["This is a simple input 1", "This is a simple input 2"] lowerCamelCase : Dict = tokenizer.bos_token_id lowerCamelCase : Tuple = tokenizer(__A ) lowerCamelCase : List[str] = tokenizer(__A ) self.assertEqual(out_s.input_ids[0] , __A ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) lowerCamelCase : int = tokenizer.decode(out_s.input_ids ) lowerCamelCase : List[Any] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , __A ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[int] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" ) lowerCamelCase : str = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#" lowerCamelCase : Union[str, Any] = "\nif len_a > len_b: result = a\nelse: result = b" lowerCamelCase : Any = tokenizer.encode(__A ) lowerCamelCase : Union[str, Any] = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"] lowerCamelCase : int = tokenizer.decode(__A , truncate_before_pattern=__A ) self.assertEqual(__A , __A ) def _snake_case ( self ): """simple docstring""" pass
283
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ): '''simple docstring''' __A : Tuple = StableUnCLIPPipeline __A : Optional[int] = TEXT_TO_IMAGE_PARAMS __A : str = TEXT_TO_IMAGE_BATCH_PARAMS __A : int = TEXT_TO_IMAGE_IMAGE_PARAMS __A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false __A : Union[str, Any] = False def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[str] = 32 lowerCamelCase : Dict = embedder_hidden_size # prior components torch.manual_seed(0 ) lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowerCamelCase : Optional[int] = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=__A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) lowerCamelCase : List[Any] = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__A , num_layers=1 , ) torch.manual_seed(0 ) lowerCamelCase : Dict = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=__A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) lowerCamelCase : Optional[int] = StableUnCLIPImageNormalizer(embedding_dim=__A ) lowerCamelCase : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) lowerCamelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowerCamelCase : str = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) lowerCamelCase : Any = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__A , layers_per_block=1 , upcast_attention=__A , use_linear_projection=__A , ) torch.manual_seed(0 ) lowerCamelCase : int = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=__A , steps_offset=1 , ) torch.manual_seed(0 ) lowerCamelCase : Optional[Any] = AutoencoderKL() lowerCamelCase : Optional[int] = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def _snake_case ( self , __A , __A=0 ): """simple docstring""" if str(__A ).startswith("mps" ): lowerCamelCase : Optional[int] = torch.manual_seed(__A ) else: lowerCamelCase : Optional[Any] = torch.Generator(device=__A ).manual_seed(__A ) lowerCamelCase : Tuple = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=__A ) @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) lowerCamelCase : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase : Dict = pipe("anime turle" , generator=__A , output_type="np" ) lowerCamelCase : Dict = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__A , __A ) def _snake_case ( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) lowerCamelCase : Union[str, Any] = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase : Any = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) lowerCamelCase : List[str] = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
283
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Optional[Any] = "altclip_text_model" def __init__( self , __A=25_0002 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=514 , __A=1 , __A=0.02 , __A=0.02 , __A=1e-05 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=768 , **__A , ): """simple docstring""" super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCamelCase : Dict = vocab_size lowerCamelCase : List[str] = hidden_size lowerCamelCase : Union[str, Any] = num_hidden_layers lowerCamelCase : Any = num_attention_heads lowerCamelCase : Tuple = hidden_act lowerCamelCase : Optional[int] = intermediate_size lowerCamelCase : Dict = hidden_dropout_prob lowerCamelCase : Tuple = attention_probs_dropout_prob lowerCamelCase : Tuple = max_position_embeddings lowerCamelCase : List[Any] = type_vocab_size lowerCamelCase : List[str] = initializer_range lowerCamelCase : Any = initializer_factor lowerCamelCase : Dict = layer_norm_eps lowerCamelCase : Union[str, Any] = position_embedding_type lowerCamelCase : int = use_cache lowerCamelCase : Optional[int] = project_dim class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Any = "altclip_vision_model" def __init__( self , __A=768 , __A=3072 , __A=512 , __A=12 , __A=12 , __A=3 , __A=224 , __A=32 , __A="quick_gelu" , __A=1e-5 , __A=0.0 , __A=0.02 , __A=1.0 , **__A , ): """simple docstring""" super().__init__(**__A ) lowerCamelCase : Optional[Any] = hidden_size lowerCamelCase : List[Any] = intermediate_size lowerCamelCase : int = projection_dim lowerCamelCase : Dict = num_hidden_layers lowerCamelCase : int = num_attention_heads lowerCamelCase : Optional[int] = num_channels lowerCamelCase : int = patch_size lowerCamelCase : List[Any] = image_size lowerCamelCase : Union[str, Any] = initializer_range lowerCamelCase : Union[str, Any] = initializer_factor lowerCamelCase : Optional[Any] = attention_dropout lowerCamelCase : Optional[int] = layer_norm_eps lowerCamelCase : Dict = hidden_act @classmethod def _snake_case ( cls , __A , **__A ): """simple docstring""" cls._set_token_in_kwargs(__A ) lowerCamelCase , lowerCamelCase : str = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get("model_type" ) == "altclip": lowerCamelCase : Tuple = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__A , **__A ) class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : str = "altclip" __A : Union[str, Any] = True def __init__( self , __A=None , __A=None , __A=768 , __A=2.6592 , **__A ): """simple docstring""" lowerCamelCase : Any = kwargs.pop("text_config_dict" , __A ) lowerCamelCase : str = kwargs.pop("vision_config_dict" , __A ) super().__init__(**__A ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: lowerCamelCase : int = {} # This is the complete result when using `text_config_dict`. lowerCamelCase : List[str] = AltCLIPTextConfig(**__A ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: lowerCamelCase : int = ( F"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """ F"""The value `text_config_dict[\"{key}\"]` will be used instead.""" ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase : int = ( F"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """ F"""value `text_config[\"{key}\"]` will be overriden.""" ) logger.warning(__A ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: lowerCamelCase : Union[str, Any] = {} # This is the complete result when using `vision_config_dict`. lowerCamelCase : int = AltCLIPVisionConfig(**__A ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: lowerCamelCase : str = { str(__A ): value for key, value in _vision_config_dict["id2label"].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: lowerCamelCase : Union[str, Any] = ( F"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """ F"""values. The value `vision_config_dict[\"{key}\"]` will be used instead.""" ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase : List[str] = ( F"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """ F"""The value `vision_config[\"{key}\"]` will be overriden.""" ) logger.warning(__A ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: lowerCamelCase : List[str] = {} logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." ) if vision_config is None: lowerCamelCase : List[str] = {} logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." ) lowerCamelCase : List[Any] = AltCLIPTextConfig(**__A ) lowerCamelCase : Tuple = AltCLIPVisionConfig(**__A ) lowerCamelCase : Tuple = projection_dim lowerCamelCase : Tuple = logit_scale_init_value lowerCamelCase : Tuple = 1.0 @classmethod def _snake_case ( cls , __A , __A , **__A ): """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ ) lowerCamelCase : List[Any] = self.text_config.to_dict() lowerCamelCase : int = self.vision_config.to_dict() lowerCamelCase : Dict = self.__class__.model_type return output
283
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _snake_case = { '''configuration_squeezebert''': [ '''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SqueezeBertConfig''', '''SqueezeBertOnnxConfig''', ], '''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''SqueezeBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SqueezeBertForMaskedLM''', '''SqueezeBertForMultipleChoice''', '''SqueezeBertForQuestionAnswering''', '''SqueezeBertForSequenceClassification''', '''SqueezeBertForTokenClassification''', '''SqueezeBertModel''', '''SqueezeBertModule''', '''SqueezeBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
283
1
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if b == 0: return 1 if (b % 2) == 0: return actual_power(SCREAMING_SNAKE_CASE_ , int(b / 2 ) ) * actual_power(SCREAMING_SNAKE_CASE_ , int(b / 2 ) ) else: return a * actual_power(SCREAMING_SNAKE_CASE_ , int(b / 2 ) ) * actual_power(SCREAMING_SNAKE_CASE_ , int(b / 2 ) ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if b < 0: return 1 / actual_power(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return actual_power(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": print(power(-2, -3))
283
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''edbeeching/decision-transformer-gym-hopper-medium''': ( '''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json''' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : str = "decision_transformer" __A : Union[str, Any] = ["past_key_values"] __A : Optional[int] = { "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , __A=17 , __A=4 , __A=128 , __A=4096 , __A=True , __A=1 , __A=1024 , __A=3 , __A=1 , __A=None , __A="relu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=1e-5 , __A=0.02 , __A=True , __A=True , __A=5_0256 , __A=5_0256 , __A=False , __A=False , **__A , ): """simple docstring""" lowerCamelCase : List[str] = state_dim lowerCamelCase : Tuple = act_dim lowerCamelCase : List[str] = hidden_size lowerCamelCase : Optional[Any] = max_ep_len lowerCamelCase : Union[str, Any] = action_tanh lowerCamelCase : int = vocab_size lowerCamelCase : List[Any] = n_positions lowerCamelCase : Dict = n_layer lowerCamelCase : int = n_head lowerCamelCase : List[Any] = n_inner lowerCamelCase : Any = activation_function lowerCamelCase : Optional[int] = resid_pdrop lowerCamelCase : str = embd_pdrop lowerCamelCase : Tuple = attn_pdrop lowerCamelCase : List[Any] = layer_norm_epsilon lowerCamelCase : Dict = initializer_range lowerCamelCase : Optional[int] = scale_attn_weights lowerCamelCase : List[Any] = use_cache lowerCamelCase : Tuple = scale_attn_by_inverse_layer_idx lowerCamelCase : Optional[int] = reorder_and_upcast_attn lowerCamelCase : Dict = bos_token_id lowerCamelCase : Any = eos_token_id super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
283
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _snake_case = { '''configuration_squeezebert''': [ '''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SqueezeBertConfig''', '''SqueezeBertOnnxConfig''', ], '''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''SqueezeBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SqueezeBertForMaskedLM''', '''SqueezeBertForMultipleChoice''', '''SqueezeBertForQuestionAnswering''', '''SqueezeBertForSequenceClassification''', '''SqueezeBertForTokenClassification''', '''SqueezeBertModel''', '''SqueezeBertModule''', '''SqueezeBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
283
def lowercase_( SCREAMING_SNAKE_CASE_ = 4000000 ): '''simple docstring''' lowerCamelCase : Any = [0, 1] lowerCamelCase : Union[str, Any] = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 lowerCamelCase : Union[str, Any] = 0 for j in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f'''{solution() = }''')
283
1
from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _snake_case = logging.get_logger(__name__) def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(SCREAMING_SNAKE_CASE_ ): return [[videos]] raise ValueError(f"""Could not make batched video from {videos}""" ) class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Any = ["pixel_values"] def __init__( self , __A = True , __A = None , __A = PILImageResampling.BILINEAR , __A = True , __A = None , __A = True , __A = 1 / 255 , __A = True , __A = True , __A = None , __A = None , **__A , ): """simple docstring""" super().__init__(**__A ) lowerCamelCase : Tuple = size if size is not None else {"shortest_edge": 256} lowerCamelCase : List[Any] = get_size_dict(__A , default_to_square=__A ) lowerCamelCase : List[Any] = crop_size if crop_size is not None else {"height": 224, "width": 224} lowerCamelCase : Dict = get_size_dict(__A , param_name="crop_size" ) lowerCamelCase : str = do_resize lowerCamelCase : Any = size lowerCamelCase : List[str] = do_center_crop lowerCamelCase : Tuple = crop_size lowerCamelCase : Dict = resample lowerCamelCase : Dict = do_rescale lowerCamelCase : List[str] = rescale_factor lowerCamelCase : Dict = offset lowerCamelCase : Dict = do_normalize lowerCamelCase : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCamelCase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _snake_case ( self , __A , __A , __A = PILImageResampling.BILINEAR , __A = None , **__A , ): """simple docstring""" lowerCamelCase : Union[str, Any] = get_size_dict(__A , default_to_square=__A ) if "shortest_edge" in size: lowerCamelCase : Optional[int] = get_resize_output_image_size(__A , size["shortest_edge"] , default_to_square=__A ) elif "height" in size and "width" in size: lowerCamelCase : Tuple = (size["height"], size["width"]) else: raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(__A , size=__A , resample=__A , data_format=__A , **__A ) def _snake_case ( self , __A , __A , __A = None , **__A , ): """simple docstring""" lowerCamelCase : str = get_size_dict(__A ) if "height" not in size or "width" not in size: raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(__A , size=(size["height"], size["width"]) , data_format=__A , **__A ) def _snake_case ( self , __A , __A , __A = True , __A = None , **__A , ): """simple docstring""" lowerCamelCase : Tuple = image.astype(np.floataa ) if offset: lowerCamelCase : Optional[int] = image - (scale / 2) return rescale(__A , scale=__A , data_format=__A , **__A ) def _snake_case ( self , __A , __A , __A , __A = None , **__A , ): """simple docstring""" return normalize(__A , mean=__A , std=__A , data_format=__A , **__A ) def _snake_case ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , ): """simple docstring""" if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. lowerCamelCase : Optional[Any] = to_numpy_array(__A ) if do_resize: lowerCamelCase : str = self.resize(image=__A , size=__A , resample=__A ) if do_center_crop: lowerCamelCase : str = self.center_crop(__A , size=__A ) if do_rescale: lowerCamelCase : Optional[int] = self.rescale(image=__A , scale=__A , offset=__A ) if do_normalize: lowerCamelCase : Dict = self.normalize(image=__A , mean=__A , std=__A ) lowerCamelCase : List[str] = to_channel_dimension_format(__A , __A ) return image def _snake_case ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ): """simple docstring""" lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize lowerCamelCase : Optional[int] = resample if resample is not None else self.resample lowerCamelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase : List[Any] = offset if offset is not None else self.offset lowerCamelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std lowerCamelCase : List[str] = size if size is not None else self.size lowerCamelCase : Tuple = get_size_dict(__A , default_to_square=__A ) lowerCamelCase : Tuple = crop_size if crop_size is not None else self.crop_size lowerCamelCase : Tuple = get_size_dict(__A , param_name="crop_size" ) if not valid_images(__A ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) lowerCamelCase : Union[str, Any] = make_batched(__A ) lowerCamelCase : Optional[int] = [ [ self._preprocess_image( image=__A , do_resize=__A , size=__A , resample=__A , do_center_crop=__A , crop_size=__A , do_rescale=__A , rescale_factor=__A , offset=__A , do_normalize=__A , image_mean=__A , image_std=__A , data_format=__A , ) for img in video ] for video in videos ] lowerCamelCase : int = {"pixel_values": videos} return BatchFeature(data=__A , tensor_type=__A )
283
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _snake_case = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
283
1
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : int = ["image_processor", "tokenizer"] __A : Optional[int] = "Pix2StructImageProcessor" __A : Union[str, Any] = ("T5Tokenizer", "T5TokenizerFast") def __init__( self , __A , __A ): """simple docstring""" lowerCamelCase : int = False super().__init__(__A , __A ) def __call__( self , __A=None , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 2048 , __A = 0 , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ): """simple docstring""" if images is None and text is None: raise ValueError("You have to specify either images or text." ) # Get only text if images is None and not self.image_processor.is_vqa: lowerCamelCase : str = self.tokenizer lowerCamelCase : List[str] = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) return text_encoding if not self.image_processor.is_vqa: # add pixel_values lowerCamelCase : List[str] = self.image_processor( __A , return_tensors=__A , max_patches=__A , **__A ) else: # add pixel_values and bbox lowerCamelCase : int = self.image_processor( __A , return_tensors=__A , max_patches=__A , header_text=__A , **__A ) if text is not None and not self.image_processor.is_vqa: lowerCamelCase : Tuple = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) if "attention_mask" in text_encoding: lowerCamelCase : int = text_encoding.pop("attention_mask" ) if "input_ids" in text_encoding: lowerCamelCase : Optional[Any] = text_encoding.pop("input_ids" ) else: lowerCamelCase : Dict = None if text_encoding is not None: encoding_image_processor.update(__A ) return encoding_image_processor def _snake_case ( self , *__A , **__A ): """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self , *__A , **__A ): """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self ): """simple docstring""" lowerCamelCase : Any = self.tokenizer.model_input_names lowerCamelCase : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
283
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline _snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False) parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''') parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''') _snake_case = parser.parse_args() _snake_case = '''cpu''' _snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings''' _snake_case = '''path-to-your-trained-model''' _snake_case = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: _snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) _snake_case = pipe.to(device) # to channels last _snake_case = pipe.unet.to(memory_format=torch.channels_last) _snake_case = pipe.vae.to(memory_format=torch.channels_last) _snake_case = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: _snake_case = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex _snake_case = torch.randn(2, 4, 64, 64) _snake_case = torch.rand(1) * 9_99 _snake_case = torch.randn(2, 77, 7_68) _snake_case = (sample, timestep, encoder_hidden_status) try: _snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: _snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) _snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) _snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: _snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute _snake_case = 6_66 _snake_case = torch.Generator(device).manual_seed(seed) _snake_case = {'''generator''': generator} if args.steps is not None: _snake_case = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): _snake_case = pipe(prompt, **generate_kwargs).images[0] # save image image.save('''generated.png''')
283
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _snake_case = logging.get_logger(__name__) class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Tuple = ["pixel_values"] def __init__( self , __A = True , __A = None , __A = PILImageResampling.BILINEAR , __A = True , __A = None , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , **__A , ): """simple docstring""" super().__init__(**__A ) lowerCamelCase : Any = size if size is not None else {"shortest_edge": 256} lowerCamelCase : Optional[Any] = get_size_dict(__A , default_to_square=__A ) lowerCamelCase : Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224} lowerCamelCase : str = get_size_dict(__A ) lowerCamelCase : Dict = do_resize lowerCamelCase : List[Any] = size lowerCamelCase : Optional[int] = resample lowerCamelCase : Any = do_center_crop lowerCamelCase : List[str] = crop_size lowerCamelCase : Dict = do_rescale lowerCamelCase : str = rescale_factor lowerCamelCase : Union[str, Any] = do_normalize lowerCamelCase : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCamelCase : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _snake_case ( self , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ): """simple docstring""" lowerCamelCase : Any = get_size_dict(__A , default_to_square=__A ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) lowerCamelCase : Dict = get_resize_output_image_size(__A , size=size["shortest_edge"] , default_to_square=__A ) return resize(__A , size=__A , resample=__A , data_format=__A , **__A ) def _snake_case ( self , __A , __A , __A = None , **__A , ): """simple docstring""" lowerCamelCase : Dict = get_size_dict(__A ) return center_crop(__A , size=(size["height"], size["width"]) , data_format=__A , **__A ) def _snake_case ( self , __A , __A , __A = None , **__A ): """simple docstring""" return rescale(__A , scale=__A , data_format=__A , **__A ) def _snake_case ( self , __A , __A , __A , __A = None , **__A , ): """simple docstring""" return normalize(__A , mean=__A , std=__A , data_format=__A , **__A ) def _snake_case ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ): """simple docstring""" lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize lowerCamelCase : Optional[int] = size if size is not None else self.size lowerCamelCase : int = get_size_dict(__A , default_to_square=__A ) lowerCamelCase : Optional[Any] = resample if resample is not None else self.resample lowerCamelCase : Any = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size lowerCamelCase : Tuple = get_size_dict(__A ) lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase : List[str] = image_mean if image_mean is not None else self.image_mean lowerCamelCase : List[Any] = image_std if image_std is not None else self.image_std lowerCamelCase : Optional[int] = make_list_of_images(__A ) if not valid_images(__A ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. lowerCamelCase : Optional[int] = [to_numpy_array(__A ) for image in images] if do_resize: lowerCamelCase : Optional[Any] = [self.resize(image=__A , size=__A , resample=__A ) for image in images] if do_center_crop: lowerCamelCase : int = [self.center_crop(image=__A , size=__A ) for image in images] if do_rescale: lowerCamelCase : List[Any] = [self.rescale(image=__A , scale=__A ) for image in images] if do_normalize: lowerCamelCase : Any = [self.normalize(image=__A , mean=__A , std=__A ) for image in images] lowerCamelCase : str = [to_channel_dimension_format(__A , __A ) for image in images] lowerCamelCase : Dict = {"pixel_values": images} return BatchFeature(data=__A , tensor_type=__A )
283
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class UpperCAmelCase_ : '''simple docstring''' def __init__( self , __A , ): """simple docstring""" lowerCamelCase : str = parent lowerCamelCase : Union[str, Any] = 13 lowerCamelCase : Optional[Any] = 7 lowerCamelCase : List[str] = True lowerCamelCase : Optional[int] = True lowerCamelCase : Union[str, Any] = True lowerCamelCase : List[Any] = True lowerCamelCase : Tuple = True lowerCamelCase : Any = False lowerCamelCase : int = False lowerCamelCase : Tuple = False lowerCamelCase : Union[str, Any] = 2 lowerCamelCase : Dict = 99 lowerCamelCase : Tuple = 0 lowerCamelCase : Any = 32 lowerCamelCase : List[Any] = 2 lowerCamelCase : Tuple = 4 lowerCamelCase : List[str] = 0.1 lowerCamelCase : int = 0.1 lowerCamelCase : int = 512 lowerCamelCase : List[Any] = 16 lowerCamelCase : Any = 2 lowerCamelCase : Any = 0.02 lowerCamelCase : List[str] = 3 lowerCamelCase : Tuple = 4 lowerCamelCase : int = "last" lowerCamelCase : int = True lowerCamelCase : Dict = None lowerCamelCase : Tuple = 0 def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) lowerCamelCase : Tuple = None if self.use_input_lengths: lowerCamelCase : Optional[Any] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCamelCase : str = None if self.use_token_type_ids: lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowerCamelCase : Dict = None lowerCamelCase : Dict = None lowerCamelCase : Tuple = None if self.use_labels: lowerCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase : int = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase : List[Any] = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Optional[Any] = TFFlaubertModel(config=__A ) lowerCamelCase : Any = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} lowerCamelCase : Dict = model(__A ) lowerCamelCase : Any = [input_ids, input_mask] lowerCamelCase : Tuple = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : int = TFFlaubertWithLMHeadModel(__A ) lowerCamelCase : List[str] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} lowerCamelCase : int = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(__A ) lowerCamelCase : Optional[int] = {"input_ids": input_ids, "lengths": input_lengths} lowerCamelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Optional[int] = TFFlaubertForSequenceClassification(__A ) lowerCamelCase : str = {"input_ids": input_ids, "lengths": input_lengths} lowerCamelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Tuple = self.num_labels lowerCamelCase : Optional[Any] = TFFlaubertForTokenClassification(config=__A ) lowerCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCamelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Any = self.num_choices lowerCamelCase : Optional[Any] = TFFlaubertForMultipleChoice(config=__A ) lowerCamelCase : Tuple = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase : int = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase : List[str] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase : Optional[int] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } lowerCamelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = self.prepare_config_and_inputs() ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Optional[Any] = config_and_inputs lowerCamelCase : List[Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "langs": token_type_ids, "lengths": input_lengths, } return config, inputs_dict @require_tf class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): '''simple docstring''' __A : str = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) __A : Dict = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable __A : Any = ( { "feature-extraction": TFFlaubertModel, "fill-mask": TFFlaubertWithLMHeadModel, "question-answering": TFFlaubertForQuestionAnsweringSimple, "text-classification": TFFlaubertForSequenceClassification, "token-classification": TFFlaubertForTokenClassification, "zero-shot": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) __A : List[str] = False __A : List[str] = False def _snake_case ( self , __A , __A , __A , __A , __A ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _snake_case ( self ): """simple docstring""" lowerCamelCase : Tuple = TFFlaubertModelTester(self ) lowerCamelCase : Optional[int] = ConfigTester(self , config_class=__A , emb_dim=37 ) def _snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*__A ) @slow def _snake_case ( self ): """simple docstring""" for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : int = TFFlaubertModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @require_tf @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[int] = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" ) lowerCamelCase : str = tf.convert_to_tensor( [[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" lowerCamelCase : Dict = model(__A )[0] lowerCamelCase : List[str] = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape , __A ) # compare the actual values for a slice. lowerCamelCase : Tuple = tf.convert_to_tensor( [ [ [-1.8768773, -1.566555, 0.27072418], [-1.6920038, -0.5873505, 1.9329599], [-2.9563985, -1.6993835, 1.7972052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
283
1
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): '''simple docstring''' @register_to_config def __init__( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A = False , ): """simple docstring""" super().__init__() lowerCamelCase : Optional[Any] = nn.Embedding(__A , __A ) lowerCamelCase : Tuple = nn.Embedding(__A , __A ) lowerCamelCase : Union[str, Any] = False lowerCamelCase : Any = nn.Dropout(p=__A ) lowerCamelCase : int = TaConfig( vocab_size=__A , d_model=__A , num_heads=__A , d_kv=__A , d_ff=__A , dropout_rate=__A , feed_forward_proj=__A , is_decoder=__A , is_encoder_decoder=__A , ) lowerCamelCase : Optional[int] = nn.ModuleList() for lyr_num in range(__A ): lowerCamelCase : str = TaBlock(__A ) self.encoders.append(__A ) lowerCamelCase : str = TaLayerNorm(__A ) lowerCamelCase : Tuple = nn.Dropout(p=__A ) def _snake_case ( self , __A , __A ): """simple docstring""" lowerCamelCase : List[Any] = self.token_embedder(__A ) lowerCamelCase : Optional[int] = encoder_input_tokens.shape[1] lowerCamelCase : List[Any] = torch.arange(__A , device=encoder_input_tokens.device ) x += self.position_encoding(__A ) lowerCamelCase : int = self.dropout_pre(__A ) # inverted the attention mask lowerCamelCase : Any = encoder_input_tokens.size() lowerCamelCase : Optional[Any] = self.get_extended_attention_mask(__A , __A ) for lyr in self.encoders: lowerCamelCase : str = lyr(__A , __A )[0] lowerCamelCase : Dict = self.layer_norm(__A ) return self.dropout_post(__A ), encoder_inputs_mask
283
import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[] ): '''simple docstring''' lowerCamelCase : Optional[Any] = size[0] - overlap_pixels * 2 lowerCamelCase : int = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels lowerCamelCase : Tuple = np.ones((size_y, size_x) , dtype=np.uinta ) * 255 lowerCamelCase : List[Any] = np.pad(SCREAMING_SNAKE_CASE_ , mode="linear_ramp" , pad_width=SCREAMING_SNAKE_CASE_ , end_values=0 ) if "l" in remove_borders: lowerCamelCase : Optional[Any] = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: lowerCamelCase : List[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: lowerCamelCase : List[Any] = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: lowerCamelCase : Tuple = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return max(SCREAMING_SNAKE_CASE_ , min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Optional[Any] = list(SCREAMING_SNAKE_CASE_ ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap lowerCamelCase : Any = clamp_rect(SCREAMING_SNAKE_CASE_ , [0, 0] , [image_size[0], image_size[1]] ) return rect def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Dict = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(SCREAMING_SNAKE_CASE_ , (original_slice, 0) ) return result def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) lowerCamelCase : int = tile.crop(SCREAMING_SNAKE_CASE_ ) return tile def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : int = n % d return n - divisor class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' def __init__( self , __A , __A , __A , __A , __A , __A , __A = 350 , ): """simple docstring""" super().__init__( vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , low_res_scheduler=__A , scheduler=__A , max_noise_level=__A , ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , **__A ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase : Tuple = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) lowerCamelCase : Union[str, Any] = add_overlap_rect(__A , __A , image.size ) lowerCamelCase : List[str] = image.crop(__A ) lowerCamelCase : Optional[int] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] lowerCamelCase : int = translated_slice_x - (original_image_slice / 2) lowerCamelCase : Optional[Any] = max(0 , __A ) lowerCamelCase : Tuple = squeeze_tile(__A , __A , __A , __A ) lowerCamelCase : Dict = to_input.size lowerCamelCase : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) lowerCamelCase : Dict = super(__A , self ).__call__(image=__A , **__A ).images[0] lowerCamelCase : Tuple = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) lowerCamelCase : Optional[Any] = unsqueeze_tile(__A , __A ) lowerCamelCase : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) lowerCamelCase : int = [] if x == 0: remove_borders.append("l" ) elif crop_rect[2] == image.size[0]: remove_borders.append("r" ) if y == 0: remove_borders.append("t" ) elif crop_rect[3] == image.size[1]: remove_borders.append("b" ) lowerCamelCase : int = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__A ) , mode="L" , ) final_image.paste( __A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __A ) @torch.no_grad() def __call__( self , __A , __A , __A = 75 , __A = 9.0 , __A = 50 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = None , __A = 1 , __A = 128 , __A = 32 , __A = 32 , ): """simple docstring""" lowerCamelCase : Dict = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) ) lowerCamelCase : Union[str, Any] = math.ceil(image.size[0] / tile_size ) lowerCamelCase : Dict = math.ceil(image.size[1] / tile_size ) lowerCamelCase : str = tcx * tcy lowerCamelCase : int = 0 for y in range(__A ): for x in range(__A ): self._process_tile( __A , __A , __A , __A , __A , __A , __A , prompt=__A , num_inference_steps=__A , guidance_scale=__A , noise_level=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , ) current_count += 1 if callback is not None: callback({"progress": current_count / total_tile_count, "image": final_image} ) return final_image def lowercase_( ): '''simple docstring''' lowerCamelCase : Dict = "stabilityai/stable-diffusion-x4-upscaler" lowerCamelCase : Union[str, Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , revision="fp16" , torch_dtype=torch.floataa ) lowerCamelCase : Optional[Any] = pipe.to("cuda" ) lowerCamelCase : List[str] = Image.open("../../docs/source/imgs/diffusers_library.jpg" ) def callback(SCREAMING_SNAKE_CASE_ ): print(f"""progress: {obj['progress']:.4f}""" ) obj["image"].save("diffusers_library_progress.jpg" ) lowerCamelCase : int = pipe(image=SCREAMING_SNAKE_CASE_ , prompt="Black font, white background, vector" , noise_level=40 , callback=SCREAMING_SNAKE_CASE_ ) final_image.save("diffusers_library.jpg" ) if __name__ == "__main__": main()
283
1
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Dict = ["image_processor", "tokenizer"] __A : Dict = "BridgeTowerImageProcessor" __A : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , __A , __A ): """simple docstring""" super().__init__(__A , __A ) def __call__( self , __A , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ): """simple docstring""" lowerCamelCase : str = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) # add pixel_values + pixel_mask lowerCamelCase : int = self.image_processor( __A , return_tensors=__A , do_normalize=__A , do_center_crop=__A , **__A ) encoding.update(__A ) return encoding def _snake_case ( self , *__A , **__A ): """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self , *__A , **__A ): """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = self.tokenizer.model_input_names lowerCamelCase : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
283
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Dict = "mobilenet_v2" def __init__( self , __A=3 , __A=224 , __A=1.0 , __A=8 , __A=8 , __A=6 , __A=32 , __A=True , __A=True , __A="relu6" , __A=True , __A=0.8 , __A=0.02 , __A=0.001 , __A=255 , **__A , ): """simple docstring""" super().__init__(**__A ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) lowerCamelCase : str = num_channels lowerCamelCase : Any = image_size lowerCamelCase : Union[str, Any] = depth_multiplier lowerCamelCase : Tuple = depth_divisible_by lowerCamelCase : Dict = min_depth lowerCamelCase : Dict = expand_ratio lowerCamelCase : Optional[Any] = output_stride lowerCamelCase : int = first_layer_is_expansion lowerCamelCase : Union[str, Any] = finegrained_output lowerCamelCase : Optional[Any] = hidden_act lowerCamelCase : Optional[Any] = tf_padding lowerCamelCase : Optional[Any] = classifier_dropout_prob lowerCamelCase : Dict = initializer_range lowerCamelCase : str = layer_norm_eps lowerCamelCase : Optional[Any] = semantic_loss_ignore_index class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Union[str, Any] = version.parse("1.11" ) @property def _snake_case ( self ): """simple docstring""" return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _snake_case ( self ): """simple docstring""" if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _snake_case ( self ): """simple docstring""" return 1e-4
283
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Dict = ["image_processor", "tokenizer"] __A : str = "CLIPImageProcessor" __A : List[str] = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast") def __init__( self , __A=None , __A=None , **__A ): """simple docstring""" lowerCamelCase : str = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __A , ) lowerCamelCase : Any = kwargs.pop("feature_extractor" ) lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__A , __A ) def __call__( self , __A=None , __A=None , __A=None , **__A ): """simple docstring""" if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: lowerCamelCase : Union[str, Any] = self.tokenizer(__A , return_tensors=__A , **__A ) if images is not None: lowerCamelCase : Tuple = self.image_processor(__A , return_tensors=__A , **__A ) if text is not None and images is not None: lowerCamelCase : Optional[Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def _snake_case ( self , *__A , **__A ): """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self , *__A , **__A ): """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self ): """simple docstring""" lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names lowerCamelCase : Optional[int] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
283
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) _snake_case = 2_99_79_24_58 # Symbols _snake_case , _snake_case , _snake_case , _snake_case = symbols('''ct x y z''') def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if velocity > c: raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError("Speed must be greater than or equal to 1!" ) return velocity / c def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_ ) ** 2 ) def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return np.array( [ [gamma(SCREAMING_SNAKE_CASE_ ), -gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), 0, 0], [-gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), gamma(SCREAMING_SNAKE_CASE_ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' if event is None: lowerCamelCase : Tuple = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(SCREAMING_SNAKE_CASE_ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: _snake_case = transform(29_97_92_45) print('''Example of four vector: ''') print(f'''ct\' = {four_vector[0]}''') print(f'''x\' = {four_vector[1]}''') print(f'''y\' = {four_vector[2]}''') print(f'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values _snake_case = {ct: c, x: 1, y: 1, z: 1} _snake_case = [four_vector[i].subs(sub_dict) for i in range(4)] print(f'''\n{numerical_vector}''')
283
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _snake_case = logging.get_logger(__name__) _snake_case = { '''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''', # See all Nat models at https://huggingface.co/models?filter=nat } class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ): '''simple docstring''' __A : Optional[int] = "nat" __A : int = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , __A=4 , __A=3 , __A=64 , __A=[3, 4, 6, 5] , __A=[2, 4, 8, 16] , __A=7 , __A=3.0 , __A=True , __A=0.0 , __A=0.0 , __A=0.1 , __A="gelu" , __A=0.02 , __A=1e-5 , __A=0.0 , __A=None , __A=None , **__A , ): """simple docstring""" super().__init__(**__A ) lowerCamelCase : List[str] = patch_size lowerCamelCase : List[Any] = num_channels lowerCamelCase : Dict = embed_dim lowerCamelCase : List[str] = depths lowerCamelCase : List[Any] = len(__A ) lowerCamelCase : List[str] = num_heads lowerCamelCase : int = kernel_size lowerCamelCase : Tuple = mlp_ratio lowerCamelCase : int = qkv_bias lowerCamelCase : Dict = hidden_dropout_prob lowerCamelCase : Dict = attention_probs_dropout_prob lowerCamelCase : Optional[Any] = drop_path_rate lowerCamelCase : Union[str, Any] = hidden_act lowerCamelCase : int = layer_norm_eps lowerCamelCase : Union[str, Any] = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCamelCase : Optional[Any] = int(embed_dim * 2 ** (len(__A ) - 1) ) lowerCamelCase : Optional[int] = layer_scale_init_value lowerCamelCase : List[Any] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(__A ) + 1 )] lowerCamelCase , lowerCamelCase : str = get_aligned_output_features_output_indices( out_features=__A , out_indices=__A , stage_names=self.stage_names )
283
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor _snake_case = logging.get_logger(__name__) class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' def __init__( self , *__A , **__A ): """simple docstring""" warnings.warn( "The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DPTImageProcessor instead." , __A , ) super().__init__(*__A , **__A )
283
1
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Optional[Any] = (DDPMScheduler,) def _snake_case ( self , **__A ): """simple docstring""" lowerCamelCase : Optional[Any] = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**__A ) return config def _snake_case ( self ): """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=__A ) def _snake_case ( self ): """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__A , beta_end=__A ) def _snake_case ( self ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__A ) def _snake_case ( self ): """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__A ) def _snake_case ( self ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=__A ) def _snake_case ( self ): """simple docstring""" self.check_over_configs(thresholding=__A ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__A , prediction_type=__A , sample_max_value=__A , ) def _snake_case ( self ): """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__A ) def _snake_case ( self ): """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[str] = self.scheduler_classes[0] lowerCamelCase : Any = self.get_scheduler_config() lowerCamelCase : List[Any] = scheduler_class(**__A ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5 def _snake_case ( self ): """simple docstring""" lowerCamelCase : Any = self.scheduler_classes[0] lowerCamelCase : List[Any] = self.get_scheduler_config() lowerCamelCase : Optional[Any] = scheduler_class(**__A ) lowerCamelCase : List[str] = len(__A ) lowerCamelCase : str = self.dummy_model() lowerCamelCase : int = self.dummy_sample_deter lowerCamelCase : int = torch.manual_seed(0 ) for t in reversed(range(__A ) ): # 1. predict noise residual lowerCamelCase : List[str] = model(__A , __A ) # 2. predict previous mean of sample x_t-1 lowerCamelCase : Optional[Any] = scheduler.step(__A , __A , __A , generator=__A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCamelCase : Dict = pred_prev_sample lowerCamelCase : Dict = torch.sum(torch.abs(__A ) ) lowerCamelCase : int = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 258.9606 ) < 1e-2 assert abs(result_mean.item() - 0.3372 ) < 1e-3 def _snake_case ( self ): """simple docstring""" lowerCamelCase : int = self.scheduler_classes[0] lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="v_prediction" ) lowerCamelCase : Any = scheduler_class(**__A ) lowerCamelCase : List[str] = len(__A ) lowerCamelCase : Any = self.dummy_model() lowerCamelCase : Tuple = self.dummy_sample_deter lowerCamelCase : int = torch.manual_seed(0 ) for t in reversed(range(__A ) ): # 1. predict noise residual lowerCamelCase : List[Any] = model(__A , __A ) # 2. predict previous mean of sample x_t-1 lowerCamelCase : int = scheduler.step(__A , __A , __A , generator=__A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCamelCase : Union[str, Any] = pred_prev_sample lowerCamelCase : str = torch.sum(torch.abs(__A ) ) lowerCamelCase : Dict = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 202.0296 ) < 1e-2 assert abs(result_mean.item() - 0.2631 ) < 1e-3 def _snake_case ( self ): """simple docstring""" lowerCamelCase : Union[str, Any] = self.scheduler_classes[0] lowerCamelCase : Optional[int] = self.get_scheduler_config() lowerCamelCase : List[Any] = scheduler_class(**__A ) lowerCamelCase : List[Any] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__A ) lowerCamelCase : List[str] = scheduler.timesteps for i, timestep in enumerate(__A ): if i == len(__A ) - 1: lowerCamelCase : Dict = -1 else: lowerCamelCase : Any = timesteps[i + 1] lowerCamelCase : str = scheduler.previous_timestep(__A ) lowerCamelCase : Optional[int] = prev_t.item() self.assertEqual(__A , __A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[int] = self.scheduler_classes[0] lowerCamelCase : Union[str, Any] = self.get_scheduler_config() lowerCamelCase : Tuple = scheduler_class(**__A ) lowerCamelCase : Optional[int] = [100, 87, 50, 51, 0] with self.assertRaises(__A , msg="`custom_timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[int] = self.scheduler_classes[0] lowerCamelCase : int = self.get_scheduler_config() lowerCamelCase : Optional[Any] = scheduler_class(**__A ) lowerCamelCase : Optional[Any] = [100, 87, 50, 1, 0] lowerCamelCase : Union[str, Any] = len(__A ) with self.assertRaises(__A , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ): scheduler.set_timesteps(num_inference_steps=__A , timesteps=__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[str] = self.scheduler_classes[0] lowerCamelCase : int = self.get_scheduler_config() lowerCamelCase : Any = scheduler_class(**__A ) lowerCamelCase : int = [scheduler.config.num_train_timesteps] with self.assertRaises( __A , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=__A )
283
import argparse _snake_case = '''docs/source/_static/js/custom.js''' def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' with open(SCREAMING_SNAKE_CASE_ , encoding="utf-8" , newline="\n" ) as f: lowerCamelCase : List[str] = f.readlines() lowerCamelCase : int = 0 # First let's put the right version while not lines[index].startswith("const stableVersion =" ): index += 1 lowerCamelCase : str = f"""const stableVersion = \"v{version}\"\n""" # Then update the dictionary while not lines[index].startswith("const versionMapping = {" ): index += 1 # We go until the end while not lines[index].startswith("}" ): index += 1 # We add the new version at the end lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n""" with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument('''--version''', help='''Release version.''') _snake_case = parser.parse_args() update_custom_js(args.version)
283
1
def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if divisor % 5 == 0 or divisor % 2 == 0: return 0 lowerCamelCase : List[Any] = 1 lowerCamelCase : Union[str, Any] = 1 while repunit: lowerCamelCase : Union[str, Any] = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def lowercase_( SCREAMING_SNAKE_CASE_ = 1000000 ): '''simple docstring''' lowerCamelCase : List[str] = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(SCREAMING_SNAKE_CASE_ ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f'''{solution() = }''')
283
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Any = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[int] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : str = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : List[Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[int] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Dict = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Dict = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Union[str, Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[int] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Any = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : int = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] )
283
1
import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset _snake_case = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self , __A ): """simple docstring""" super().__init__() lowerCamelCase : str = torchvision.models.resnetaaa(pretrained=__A ) lowerCamelCase : Dict = list(model.children() )[:-2] lowerCamelCase : Optional[int] = nn.Sequential(*__A ) lowerCamelCase : Union[str, Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def _snake_case ( self , __A ): """simple docstring""" lowerCamelCase : Any = self.pool(self.model(__A ) ) lowerCamelCase : Tuple = torch.flatten(__A , start_dim=2 ) lowerCamelCase : Optional[int] = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' def __init__( self , __A , __A , __A , __A , __A ): """simple docstring""" lowerCamelCase : int = [json.loads(__A ) for l in open(__A )] lowerCamelCase : List[str] = os.path.dirname(__A ) lowerCamelCase : List[Any] = tokenizer lowerCamelCase : Optional[Any] = labels lowerCamelCase : Any = len(__A ) lowerCamelCase : Union[str, Any] = max_seq_length lowerCamelCase : Union[str, Any] = transforms def __len__( self ): """simple docstring""" return len(self.data ) def __getitem__( self , __A ): """simple docstring""" lowerCamelCase : List[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=__A ) ) lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = sentence[0], sentence[1:-1], sentence[-1] lowerCamelCase : str = sentence[: self.max_seq_length] lowerCamelCase : Any = torch.zeros(self.n_classes ) lowerCamelCase : List[str] = 1 lowerCamelCase : List[Any] = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" ) lowerCamelCase : List[str] = self.transforms(__A ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = Counter() for row in self.data: label_freqs.update(row["label"] ) return label_freqs def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Any = [len(row["sentence"] ) for row in batch] lowerCamelCase , lowerCamelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : List[str] = torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.long ) lowerCamelCase : List[str] = torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ): lowerCamelCase : Optional[Any] = input_row["sentence"] lowerCamelCase : int = 1 lowerCamelCase : Dict = torch.stack([row["image"] for row in batch] ) lowerCamelCase : str = torch.stack([row["label"] for row in batch] ) lowerCamelCase : Union[str, Any] = torch.stack([row["image_start_token"] for row in batch] ) lowerCamelCase : Any = torch.stack([row["image_end_token"] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowercase_( ): '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowercase_( ): '''simple docstring''' return transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ), ] )
283
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Dict = "openai/whisper-base" __A : str = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __A : Any = "transcriber" __A : Any = WhisperProcessor __A : int = WhisperForConditionalGeneration __A : Any = ["audio"] __A : List[str] = ["text"] def _snake_case ( self , __A ): """simple docstring""" return self.pre_processor(__A , return_tensors="pt" ).input_features def _snake_case ( self , __A ): """simple docstring""" return self.model.generate(inputs=__A ) def _snake_case ( self , __A ): """simple docstring""" return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0]
283
1
import math def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(SCREAMING_SNAKE_CASE_ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("This should never happen" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. _snake_case = '''Enter the base and the power separated by a comma: ''' _snake_case , _snake_case = map(int, input(prompt).split(''',''')) _snake_case , _snake_case = map(int, input(prompt).split(''',''')) # We find the log of each number, using the function res(), which takes two # arguments. _snake_case = res(xa, ya) _snake_case = res(xa, ya) # We check for the largest number if resa > resa: print('''Largest number is''', xa, '''^''', ya) elif resa > resa: print('''Largest number is''', xa, '''^''', ya) else: print('''Both are equal''')
283
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if "model" in orig_key: lowerCamelCase : Dict = orig_key.replace("model." , "" ) if "norm1" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("norm1" , "attention.output.LayerNorm" ) if "norm2" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("norm2" , "output.LayerNorm" ) if "norm" in orig_key: lowerCamelCase : Optional[Any] = orig_key.replace("norm" , "LayerNorm" ) if "transformer" in orig_key: lowerCamelCase : int = orig_key.split("." )[0].split("_" )[-1] lowerCamelCase : Dict = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: lowerCamelCase : List[str] = orig_key.replace("mha.attn" , "attention.self" ) if "mha" in orig_key: lowerCamelCase : List[Any] = orig_key.replace("mha" , "attention" ) if "W_q" in orig_key: lowerCamelCase : Optional[int] = orig_key.replace("W_q" , "self.query" ) if "W_k" in orig_key: lowerCamelCase : List[Any] = orig_key.replace("W_k" , "self.key" ) if "W_v" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("W_v" , "self.value" ) if "ff1" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("ff1" , "intermediate.dense" ) if "ff2" in orig_key: lowerCamelCase : Optional[int] = orig_key.replace("ff2" , "output.dense" ) if "ff" in orig_key: lowerCamelCase : Optional[int] = orig_key.replace("ff" , "output.dense" ) if "mlm_class" in orig_key: lowerCamelCase : Dict = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" ) if "mlm" in orig_key: lowerCamelCase : List[Any] = orig_key.replace("mlm" , "cls.predictions.transform" ) if "cls" not in orig_key: lowerCamelCase : int = "yoso." + orig_key return orig_key def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCamelCase : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if ("pooler" in key) or ("sen_class" in key): continue else: lowerCamelCase : Dict = val lowerCamelCase : Dict = orig_state_dict["cls.predictions.decoder.bias"] lowerCamelCase : Dict = torch.arange(SCREAMING_SNAKE_CASE_ ).expand((1, -1) ) + 2 return orig_state_dict def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : List[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model_state_dict"] lowerCamelCase : List[str] = YosoConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Any = YosoForMaskedLM(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : List[Any] = convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE_ ) print(model.load_state_dict(SCREAMING_SNAKE_CASE_ ) ) model.eval() model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The json file for YOSO model config.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _snake_case = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
283
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=__A ).to(__A ) lowerCamelCase : Dict = AutoTokenizer.from_pretrained("google/mt5-small" ) lowerCamelCase : Optional[int] = tokenizer("Hello there" , return_tensors="pt" ).input_ids lowerCamelCase : Any = tokenizer("Hi I am" , return_tensors="pt" ).input_ids lowerCamelCase : Dict = model(input_ids.to(__A ) , labels=labels.to(__A ) ).loss lowerCamelCase : Dict = -(labels.shape[-1] * loss.item()) lowerCamelCase : Tuple = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
283
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Optional[int] = "M-CLIP" def __init__( self , __A=1024 , __A=768 , **__A ): """simple docstring""" lowerCamelCase : str = transformerDimSize lowerCamelCase : Any = imageDimSize super().__init__(**__A ) class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Tuple = MCLIPConfig def __init__( self , __A , *__A , **__A ): """simple docstring""" super().__init__(__A , *__A , **__A ) lowerCamelCase : Tuple = XLMRobertaModel(__A ) lowerCamelCase : Optional[Any] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def _snake_case ( self , __A , __A ): """simple docstring""" lowerCamelCase : Any = self.transformer(input_ids=__A , attention_mask=__A )[0] lowerCamelCase : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(__A ), embs
283
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _snake_case = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''DeiTFeatureExtractor'''] _snake_case = ['''DeiTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DeiTForImageClassification''', '''DeiTForImageClassificationWithTeacher''', '''DeiTForMaskedImageModeling''', '''DeiTModel''', '''DeiTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDeiTForImageClassification''', '''TFDeiTForImageClassificationWithTeacher''', '''TFDeiTForMaskedImageModeling''', '''TFDeiTModel''', '''TFDeiTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
283
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = tempfile.mkdtemp() # fmt: off lowerCamelCase : Any = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on lowerCamelCase : List[Any] = dict(zip(__A , range(len(__A ) ) ) ) lowerCamelCase : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] lowerCamelCase : Optional[Any] = {"unk_token": "<unk>"} lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__A ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__A ) ) lowerCamelCase : str = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } lowerCamelCase : str = os.path.join(self.tmpdirname , __A ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(__A , __A ) def _snake_case ( self , **__A ): """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__A ) def _snake_case ( self , **__A ): """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__A ) def _snake_case ( self , **__A ): """simple docstring""" return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__A ) def _snake_case ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase : Tuple = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = self.get_tokenizer() lowerCamelCase : Optional[Any] = self.get_rust_tokenizer() lowerCamelCase : Tuple = self.get_image_processor() lowerCamelCase : List[Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A ) processor_slow.save_pretrained(self.tmpdirname ) lowerCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__A ) lowerCamelCase : Optional[int] = OwlViTProcessor(tokenizer=__A , image_processor=__A ) processor_fast.save_pretrained(self.tmpdirname ) lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __A ) self.assertIsInstance(processor_fast.tokenizer , __A ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __A ) self.assertIsInstance(processor_fast.image_processor , __A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowerCamelCase : List[str] = self.get_image_processor(do_normalize=__A ) lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__A ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = self.get_image_processor() lowerCamelCase : Optional[int] = self.get_tokenizer() lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Tuple = self.prepare_image_inputs() lowerCamelCase : int = image_processor(__A , return_tensors="np" ) lowerCamelCase : Union[str, Any] = processor(images=__A , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Union[str, Any] = self.get_image_processor() lowerCamelCase : Dict = self.get_tokenizer() lowerCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Tuple = "lower newer" lowerCamelCase : Union[str, Any] = processor(text=__A , return_tensors="np" ) lowerCamelCase : List[Any] = tokenizer(__A , return_tensors="np" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Any = self.get_image_processor() lowerCamelCase : Any = self.get_tokenizer() lowerCamelCase : int = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Optional[Any] = "lower newer" lowerCamelCase : Dict = self.prepare_image_inputs() lowerCamelCase : Any = processor(text=__A , images=__A ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Any = "google/owlvit-base-patch32" lowerCamelCase : List[Any] = OwlViTProcessor.from_pretrained(__A ) lowerCamelCase : Tuple = ["cat", "nasa badge"] lowerCamelCase : str = processor(text=__A ) lowerCamelCase : Union[str, Any] = 16 self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = "google/owlvit-base-patch32" lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(__A ) lowerCamelCase : Dict = [["cat", "nasa badge"], ["person"]] lowerCamelCase : int = processor(text=__A ) lowerCamelCase : Tuple = 16 lowerCamelCase : Any = len(__A ) lowerCamelCase : Optional[Any] = max([len(__A ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = "google/owlvit-base-patch32" lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(__A ) lowerCamelCase : List[Any] = ["cat", "nasa badge"] lowerCamelCase : Optional[Any] = processor(text=__A ) lowerCamelCase : int = 16 lowerCamelCase : List[str] = inputs["input_ids"] lowerCamelCase : int = [ [4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Any = self.get_image_processor() lowerCamelCase : List[str] = self.get_tokenizer() lowerCamelCase : str = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Dict = self.prepare_image_inputs() lowerCamelCase : Union[str, Any] = self.prepare_image_inputs() lowerCamelCase : Any = processor(images=__A , query_images=__A ) self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[Any] = self.get_image_processor() lowerCamelCase : Optional[int] = self.get_tokenizer() lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase : List[Any] = processor.batch_decode(__A ) lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(__A ) self.assertListEqual(__A , __A )
283
1
import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) _snake_case = logging.getLogger(__name__) def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : int = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Optional[int] = { "repo_id": str(SCREAMING_SNAKE_CASE_ ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(SCREAMING_SNAKE_CASE_ , "git_log.json" ) , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=4 ) def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if params.n_gpu <= 0: lowerCamelCase : str = 0 lowerCamelCase : int = -1 lowerCamelCase : Union[str, Any] = True lowerCamelCase : List[Any] = False return assert torch.cuda.is_available() logger.info("Initializing GPUs" ) if params.n_gpu > 1: assert params.local_rank != -1 lowerCamelCase : Dict = int(os.environ["WORLD_SIZE"] ) lowerCamelCase : Union[str, Any] = int(os.environ["N_GPU_NODE"] ) lowerCamelCase : Any = int(os.environ["RANK"] ) # number of nodes / node ID lowerCamelCase : Any = params.world_size // params.n_gpu_per_node lowerCamelCase : Any = params.global_rank // params.n_gpu_per_node lowerCamelCase : Optional[int] = True assert params.n_nodes == int(os.environ["N_NODES"] ) assert params.node_id == int(os.environ["NODE_RANK"] ) # local job (single GPU) else: assert params.local_rank == -1 lowerCamelCase : Tuple = 1 lowerCamelCase : Dict = 0 lowerCamelCase : Dict = 0 lowerCamelCase : Dict = 0 lowerCamelCase : Dict = 1 lowerCamelCase : Union[str, Any] = 1 lowerCamelCase : List[Any] = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode lowerCamelCase : int = params.node_id == 0 and params.local_rank == 0 lowerCamelCase : Dict = params.n_nodes > 1 # summary lowerCamelCase : Optional[Any] = f"""--- Global rank: {params.global_rank} - """ logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes ) logger.info(PREFIX + "Node ID : %i" % params.node_id ) logger.info(PREFIX + "Local rank : %i" % params.local_rank ) logger.info(PREFIX + "World size : %i" % params.world_size ) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node ) logger.info(PREFIX + "Master : %s" % str(params.is_master ) ) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) ) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) ) logger.info(PREFIX + "Hostname : %s" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed" ) torch.distributed.init_process_group( init_method="env://" , backend="nccl" , ) def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
283
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase_ ( UpperCamelCase , unittest.TestCase ): '''simple docstring''' __A : List[Any] = BioGptTokenizer __A : Optional[int] = False def _snake_case ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase : Union[str, Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] lowerCamelCase : str = dict(zip(__A , range(len(__A ) ) ) ) lowerCamelCase : Dict = ["l o 123", "lo w 1456", "e r</w> 1789", ""] lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(__A ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(__A ) ) def _snake_case ( self , __A ): """simple docstring""" lowerCamelCase : Dict = "lower newer" lowerCamelCase : Union[str, Any] = "lower newer" return input_text, output_text def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[str] = BioGptTokenizer(self.vocab_file , self.merges_file ) lowerCamelCase : Optional[int] = "lower" lowerCamelCase : Any = ["low", "er</w>"] lowerCamelCase : List[str] = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCamelCase : Union[str, Any] = tokens + ["<unk>"] lowerCamelCase : List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) @slow def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[str] = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) lowerCamelCase : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=__A ) lowerCamelCase : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=__A ) lowerCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__A ) lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
283
1
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''huggingface/time-series-transformer-tourism-monthly''': ( '''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json''' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Union[str, Any] = "time_series_transformer" __A : List[Any] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self , __A = None , __A = None , __A = "student_t" , __A = "nll" , __A = 1 , __A = [1, 2, 3, 4, 5, 6, 7] , __A = "mean" , __A = 0 , __A = 0 , __A = 0 , __A = 0 , __A = None , __A = None , __A = 32 , __A = 32 , __A = 2 , __A = 2 , __A = 2 , __A = 2 , __A = True , __A = "gelu" , __A = 64 , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 100 , __A = 0.02 , __A=True , **__A , ): """simple docstring""" lowerCamelCase : Union[str, Any] = prediction_length lowerCamelCase : str = context_length or prediction_length lowerCamelCase : Any = distribution_output lowerCamelCase : int = loss lowerCamelCase : Optional[int] = input_size lowerCamelCase : str = num_time_features lowerCamelCase : List[Any] = lags_sequence lowerCamelCase : Tuple = scaling lowerCamelCase : int = num_dynamic_real_features lowerCamelCase : Any = num_static_real_features lowerCamelCase : Any = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(__A ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) lowerCamelCase : str = cardinality else: lowerCamelCase : Optional[int] = [0] if embedding_dimension and num_static_categorical_features > 0: if len(__A ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) lowerCamelCase : Optional[int] = embedding_dimension else: lowerCamelCase : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase : Union[str, Any] = num_parallel_samples # Transformer architecture configuration lowerCamelCase : Union[str, Any] = input_size * len(__A ) + self._number_of_features lowerCamelCase : int = d_model lowerCamelCase : Optional[int] = encoder_attention_heads lowerCamelCase : Optional[int] = decoder_attention_heads lowerCamelCase : List[str] = encoder_ffn_dim lowerCamelCase : Tuple = decoder_ffn_dim lowerCamelCase : Any = encoder_layers lowerCamelCase : str = decoder_layers lowerCamelCase : Optional[Any] = dropout lowerCamelCase : Optional[Any] = attention_dropout lowerCamelCase : Optional[Any] = activation_dropout lowerCamelCase : List[str] = encoder_layerdrop lowerCamelCase : int = decoder_layerdrop lowerCamelCase : Dict = activation_function lowerCamelCase : Dict = init_std lowerCamelCase : int = use_cache super().__init__(is_encoder_decoder=__A , **__A ) @property def _snake_case ( self ): """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
283
def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if divisor % 5 == 0 or divisor % 2 == 0: return 0 lowerCamelCase : List[Any] = 1 lowerCamelCase : Union[str, Any] = 1 while repunit: lowerCamelCase : Union[str, Any] = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def lowercase_( SCREAMING_SNAKE_CASE_ = 1000000 ): '''simple docstring''' lowerCamelCase : List[str] = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(SCREAMING_SNAKE_CASE_ ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f'''{solution() = }''')
283
1
from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput _snake_case = 8 def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=BITS ): '''simple docstring''' lowerCamelCase : Dict = x.device lowerCamelCase : Tuple = (x * 255).int().clamp(0 , 255 ) lowerCamelCase : Any = 2 ** torch.arange(bits - 1 , -1 , -1 , device=SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Union[str, Any] = rearrange(SCREAMING_SNAKE_CASE_ , "d -> d 1 1" ) lowerCamelCase : Dict = rearrange(SCREAMING_SNAKE_CASE_ , "b c h w -> b c 1 h w" ) lowerCamelCase : List[str] = ((x & mask) != 0).float() lowerCamelCase : Dict = rearrange(SCREAMING_SNAKE_CASE_ , "b c d h w -> b (c d) h w" ) lowerCamelCase : Optional[int] = bits * 2 - 1 return bits def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=BITS ): '''simple docstring''' lowerCamelCase : Any = x.device lowerCamelCase : Any = (x > 0).int() lowerCamelCase : Optional[Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=SCREAMING_SNAKE_CASE_ , dtype=torch.intaa ) lowerCamelCase : List[Any] = rearrange(SCREAMING_SNAKE_CASE_ , "d -> d 1 1" ) lowerCamelCase : Any = rearrange(SCREAMING_SNAKE_CASE_ , "b (c d) h w -> b c d h w" , d=8 ) lowerCamelCase : Any = reduce(x * mask , "b c d h w -> b c h w" , "sum" ) return (dec / 255).clamp(0.0 , 1.0 ) def lowercase_( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = True , ): '''simple docstring''' if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) lowerCamelCase : Union[str, Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas lowerCamelCase : str = self.alphas_cumprod[timestep] lowerCamelCase : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod lowerCamelCase : Tuple = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowerCamelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" lowerCamelCase : int = self.bit_scale if self.config.clip_sample: lowerCamelCase : Union[str, Any] = torch.clamp(SCREAMING_SNAKE_CASE_ , -scale , SCREAMING_SNAKE_CASE_ ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) lowerCamelCase : Optional[int] = self._get_variance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Any = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide lowerCamelCase : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowerCamelCase : int = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowerCamelCase : List[Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 lowerCamelCase : List[str] = model_output.device if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else "cpu" lowerCamelCase : Dict = torch.randn(model_output.shape , dtype=model_output.dtype , generator=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : List[str] = self._get_variance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ** 0.5 * eta * noise lowerCamelCase : Tuple = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ ) def lowercase_( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="epsilon" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = True , ): '''simple docstring''' lowerCamelCase : List[Any] = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: lowerCamelCase , lowerCamelCase : Any = torch.split(SCREAMING_SNAKE_CASE_ , sample.shape[1] , dim=1 ) else: lowerCamelCase : Tuple = None # 1. compute alphas, betas lowerCamelCase : List[str] = self.alphas_cumprod[t] lowerCamelCase : Union[str, Any] = self.alphas_cumprod[t - 1] if t > 0 else self.one lowerCamelCase : int = 1 - alpha_prod_t lowerCamelCase : Union[str, Any] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": lowerCamelCase : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": lowerCamelCase : Union[str, Any] = model_output else: raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" ) # 3. Clip "predicted x_0" lowerCamelCase : Tuple = self.bit_scale if self.config.clip_sample: lowerCamelCase : List[str] = torch.clamp(SCREAMING_SNAKE_CASE_ , -scale , SCREAMING_SNAKE_CASE_ ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCamelCase : Optional[int] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t lowerCamelCase : Optional[Any] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCamelCase : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise lowerCamelCase : Optional[int] = 0 if t > 0: lowerCamelCase : int = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=SCREAMING_SNAKE_CASE_ ).to(model_output.device ) lowerCamelCase : Optional[Any] = (self._get_variance(SCREAMING_SNAKE_CASE_ , predicted_variance=SCREAMING_SNAKE_CASE_ ) ** 0.5) * noise lowerCamelCase : int = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ ) class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' def __init__( self , __A , __A , __A = 1.0 , ): """simple docstring""" super().__init__() lowerCamelCase : str = bit_scale lowerCamelCase : List[str] = ( ddim_bit_scheduler_step if isinstance(__A , __A ) else ddpm_bit_scheduler_step ) self.register_modules(unet=__A , scheduler=__A ) @torch.no_grad() def __call__( self , __A = 256 , __A = 256 , __A = 50 , __A = None , __A = 1 , __A = "pil" , __A = True , **__A , ): """simple docstring""" lowerCamelCase : List[str] = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=__A , ) lowerCamelCase : List[Any] = decimal_to_bits(__A ) * self.bit_scale lowerCamelCase : Any = latents.to(self.device ) self.scheduler.set_timesteps(__A ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual lowerCamelCase : Union[str, Any] = self.unet(__A , __A ).sample # compute the previous noisy sample x_t -> x_t-1 lowerCamelCase : Optional[int] = self.scheduler.step(__A , __A , __A ).prev_sample lowerCamelCase : Any = bits_to_decimal(__A ) if output_type == "pil": lowerCamelCase : Optional[Any] = self.numpy_to_pil(__A ) if not return_dict: return (image,) return ImagePipelineOutput(images=__A )
283
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ): '''simple docstring''' lowerCamelCase : Tuple = "backbone." if is_semantic else "" lowerCamelCase : int = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ (f"""{prefix}cls_token""", "beit.embeddings.cls_token"), (f"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"), (f"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"), (f"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ("mask_token", "beit.embeddings.mask_token"), ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) else: # layernorm + classification head rename_keys.extend( [ ("fc_norm.weight", "beit.pooler.layernorm.weight"), ("fc_norm.bias", "beit.pooler.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): lowerCamelCase : Optional[Any] = "backbone." if is_semantic else "" # queries, keys and values lowerCamelCase : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" ) lowerCamelCase : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" ) lowerCamelCase : Tuple = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" ) lowerCamelCase : str = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase : Any = q_bias lowerCamelCase : Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase : Optional[int] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase : int = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowerCamelCase : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" ) lowerCamelCase : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" ) lowerCamelCase : int = gamma_a lowerCamelCase : Optional[Any] = gamma_a def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Optional[Any] = dct.pop(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : List[Any] = val def lowercase_( ): '''simple docstring''' lowerCamelCase : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): '''simple docstring''' lowerCamelCase : List[Any] = False if "rvlcdip" in checkpoint_url else True lowerCamelCase : str = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE_ , use_mask_token=SCREAMING_SNAKE_CASE_ ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowerCamelCase : Union[str, Any] = 1024 lowerCamelCase : Any = 4096 lowerCamelCase : str = 24 lowerCamelCase : List[Any] = 16 # labels if "rvlcdip" in checkpoint_url: lowerCamelCase : Optional[Any] = 16 lowerCamelCase : Tuple = "huggingface/label-files" lowerCamelCase : List[str] = "rvlcdip-id2label.json" lowerCamelCase : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) lowerCamelCase : Any = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} lowerCamelCase : Tuple = idalabel lowerCamelCase : Dict = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowerCamelCase : int = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"] lowerCamelCase : Tuple = create_rename_keys(SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ ) # load HuggingFace model lowerCamelCase : List[Any] = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE_ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE_ ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # Check outputs on an image lowerCamelCase : str = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Any = prepare_img() lowerCamelCase : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ) lowerCamelCase : Optional[Any] = encoding["pixel_values"] lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Dict = outputs.logits # verify logits lowerCamelCase : List[Any] = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE_ ), "Shape of logits not as expected" Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: if has_lm_head: lowerCamelCase : Optional[Any] = "dit-base" if "base" in checkpoint_url else "dit-large" else: lowerCamelCase : Dict = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip" image_processor.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) model.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) _snake_case = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
283
1
from argparse import ArgumentParser from .env import EnvironmentCommand def lowercase_( ): '''simple docstring''' lowerCamelCase : str = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" ) lowerCamelCase : Dict = parser.add_subparsers(help="diffusers-cli command helpers" ) # Register commands EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ ) # Let's go lowerCamelCase : str = parser.parse_args() if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ): parser.print_help() exit(1 ) # Run lowerCamelCase : int = args.func(SCREAMING_SNAKE_CASE_ ) service.run() if __name__ == "__main__": main()
283
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Dict = ["image_processor", "tokenizer"] __A : Dict = "BridgeTowerImageProcessor" __A : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , __A , __A ): """simple docstring""" super().__init__(__A , __A ) def __call__( self , __A , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ): """simple docstring""" lowerCamelCase : str = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) # add pixel_values + pixel_mask lowerCamelCase : int = self.image_processor( __A , return_tensors=__A , do_normalize=__A , do_center_crop=__A , **__A ) encoding.update(__A ) return encoding def _snake_case ( self , *__A , **__A ): """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self , *__A , **__A ): """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = self.tokenizer.model_input_names lowerCamelCase : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
283
1
import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger _snake_case = get_logger(__name__) _snake_case = Path(__file__).parent / '''model_card_template.md''' _snake_case = uuida().hex _snake_case = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES _snake_case = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES _snake_case = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/''' def lowercase_( SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' lowerCamelCase : Tuple = f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}""" if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f"""; torch/{_torch_version}""" if is_flax_available(): ua += f"""; jax/{_jax_version}""" ua += f"""; flax/{_flax_version}""" if is_onnx_available(): ua += f"""; onnxruntime/{_onnxruntime_version}""" # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): ua += "; " + user_agent return ua def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' if token is None: lowerCamelCase : List[str] = HfFolder.get_token() if organization is None: lowerCamelCase : Union[str, Any] = whoami(SCREAMING_SNAKE_CASE_ )["name"] return f"""{username}/{model_id}""" else: return f"""{organization}/{model_id}""" def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(SCREAMING_SNAKE_CASE_ , "local_rank" ) and args.local_rank not in [-1, 0]: return lowerCamelCase : Optional[int] = args.hub_token if hasattr(SCREAMING_SNAKE_CASE_ , "hub_token" ) else None lowerCamelCase : Dict = get_full_repo_name(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ ) lowerCamelCase : str = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE_ , model_name=SCREAMING_SNAKE_CASE_ , repo_name=SCREAMING_SNAKE_CASE_ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE_ , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE_ , "gradient_accumulation_steps" ) else None ) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE_ , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE_ , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE_ , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE_ , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE_ , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE_ , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE_ , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE_ , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE_ , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , ) lowerCamelCase : Dict = os.path.join(args.output_dir , "README.md" ) model_card.save(SCREAMING_SNAKE_CASE_ ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash lowerCamelCase : str = str(Path(SCREAMING_SNAKE_CASE_ ).as_posix() ) lowerCamelCase : Optional[int] = re.search(r"snapshots/([^/]+)/" , SCREAMING_SNAKE_CASE_ ) if search is None: return None lowerCamelCase : str = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE_ ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. _snake_case = os.path.expanduser( os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface''')) ) _snake_case = os.path.join(hf_cache_home, '''diffusers''') def lowercase_( SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' if new_cache_dir is None: lowerCamelCase : List[Any] = DIFFUSERS_CACHE if old_cache_dir is None: lowerCamelCase : Any = old_diffusers_cache lowerCamelCase : Union[str, Any] = Path(SCREAMING_SNAKE_CASE_ ).expanduser() lowerCamelCase : str = Path(SCREAMING_SNAKE_CASE_ ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): lowerCamelCase : Dict = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE_ ) new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) os.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) try: os.symlink(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). _snake_case = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''') if not os.path.isfile(cache_version_file): _snake_case = 0 else: with open(cache_version_file) as f: try: _snake_case = int(f.read()) except ValueError: _snake_case = 0 if cache_version < 1: _snake_case = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( '''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ''' '''existing cached models. This is a one-time operation, you can interrupt it or run it ''' '''later by calling `diffusers.utils.hub_utils.move_cache()`.''' ) try: move_cache() except Exception as e: _snake_case = '''\n'''.join(traceback.format_tb(e.__traceback__)) logger.error( f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease ''' '''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ''' '''message and we will do our best to help.''' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, '''w''') as f: f.write('''1''') except Exception: logger.warning( f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure ''' '''the directory exists and can be written to.''' ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' if variant is not None: lowerCamelCase : Optional[int] = weights_name.split("." ) lowerCamelCase : Optional[int] = splits[:-1] + [variant] + splits[-1:] lowerCamelCase : Optional[Any] = ".".join(SCREAMING_SNAKE_CASE_ ) return weights_name def lowercase_( SCREAMING_SNAKE_CASE_ , *, SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , ): '''simple docstring''' lowerCamelCase : str = str(SCREAMING_SNAKE_CASE_ ) if os.path.isfile(SCREAMING_SNAKE_CASE_ ): return pretrained_model_name_or_path elif os.path.isdir(SCREAMING_SNAKE_CASE_ ): if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ): # Load from a PyTorch checkpoint lowerCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ): lowerCamelCase : str = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return model_file else: raise EnvironmentError( f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(SCREAMING_SNAKE_CASE_ ).base_version ) >= version.parse("0.20.0" ) ): try: lowerCamelCase : List[str] = hf_hub_download( SCREAMING_SNAKE_CASE_ , filename=_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , user_agent=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , revision=revision or commit_hash , ) warnings.warn( f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , SCREAMING_SNAKE_CASE_ , ) return model_file except: # noqa: E722 warnings.warn( f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}' so that the correct variant file can be added.""" , SCREAMING_SNAKE_CASE_ , ) try: # 2. Load model file as usual lowerCamelCase : Any = hf_hub_download( SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , user_agent=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """ "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """ "this model name. Check the model page at " f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" ) except EntryNotFoundError: raise EnvironmentError( f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" ) except HTTPError as err: raise EnvironmentError( f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" ) except ValueError: raise EnvironmentError( f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it""" f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a""" f""" directory containing a file named {weights_name} or""" " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """ "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """ f"""containing a file named {weights_name}""" )
283
def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 , 0 , -1 ): lowerCamelCase : Tuple = False for j in range(SCREAMING_SNAKE_CASE_ , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: lowerCamelCase , lowerCamelCase : int = unsorted[j - 1], unsorted[j] lowerCamelCase : Optional[int] = True for j in range(SCREAMING_SNAKE_CASE_ ): if unsorted[j] > unsorted[j + 1]: lowerCamelCase , lowerCamelCase : Union[str, Any] = unsorted[j + 1], unsorted[j] lowerCamelCase : Optional[Any] = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() _snake_case = input('''Enter numbers separated by a comma:\n''').strip() _snake_case = [int(item) for item in user_input.split(''',''')] print(f'''{cocktail_shaker_sort(unsorted) = }''')
283
1
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version _snake_case = version.parse(importlib_metadata.version('''nltk''')) if NLTK_VERSION >= version.Version('''3.6.4'''): from nltk import word_tokenize _snake_case = '''\ @inproceedings{banarjee2005, title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments}, author = {Banerjee, Satanjeev and Lavie, Alon}, booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization}, month = jun, year = {2005}, address = {Ann Arbor, Michigan}, publisher = {Association for Computational Linguistics}, url = {https://www.aclweb.org/anthology/W05-0909}, pages = {65--72}, } ''' _snake_case = '''\ METEOR, an automatic metric for machine translation evaluation that is based on a generalized concept of unigram matching between the machine-produced translation and human-produced reference translations. Unigrams can be matched based on their surface forms, stemmed forms, and meanings; furthermore, METEOR can be easily extended to include more advanced matching strategies. Once all generalized unigram matches between the two strings have been found, METEOR computes a score for this matching using a combination of unigram-precision, unigram-recall, and a measure of fragmentation that is designed to directly capture how well-ordered the matched words in the machine translation are in relation to the reference. METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic data and 0.331 on the Chinese data. This is shown to be an improvement on using simply unigram-precision, unigram-recall and their harmonic F1 combination. ''' _snake_case = ''' Computes METEOR score of translated segments against one or more references. Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. alpha: Parameter for controlling relative weights of precision and recall. default: 0.9 beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3 gamma: Relative weight assigned to fragmentation penalty. default: 0.5 Returns: \'meteor\': meteor score. Examples: >>> meteor = datasets.load_metric(\'meteor\') >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"] >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"] >>> results = meteor.compute(predictions=predictions, references=references) >>> print(round(results["meteor"], 4)) 0.6944 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): '''simple docstring''' def _snake_case ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[ "https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score", "https://en.wikipedia.org/wiki/METEOR", ] , ) def _snake_case ( self , __A ): """simple docstring""" import nltk nltk.download("wordnet" ) if NLTK_VERSION >= version.Version("3.6.5" ): nltk.download("punkt" ) if NLTK_VERSION >= version.Version("3.6.6" ): nltk.download("omw-1.4" ) def _snake_case ( self , __A , __A , __A=0.9 , __A=3 , __A=0.5 ): """simple docstring""" if NLTK_VERSION >= version.Version("3.6.5" ): lowerCamelCase : List[Any] = [ meteor_score.single_meteor_score( word_tokenize(__A ) , word_tokenize(__A ) , alpha=__A , beta=__A , gamma=__A ) for ref, pred in zip(__A , __A ) ] else: lowerCamelCase : Dict = [ meteor_score.single_meteor_score(__A , __A , alpha=__A , beta=__A , gamma=__A ) for ref, pred in zip(__A , __A ) ] return {"meteor": np.mean(__A )}
283
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ): '''simple docstring''' __A : Tuple = StableUnCLIPPipeline __A : Optional[int] = TEXT_TO_IMAGE_PARAMS __A : str = TEXT_TO_IMAGE_BATCH_PARAMS __A : int = TEXT_TO_IMAGE_IMAGE_PARAMS __A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false __A : Union[str, Any] = False def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[str] = 32 lowerCamelCase : Dict = embedder_hidden_size # prior components torch.manual_seed(0 ) lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowerCamelCase : Optional[int] = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=__A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) lowerCamelCase : List[Any] = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__A , num_layers=1 , ) torch.manual_seed(0 ) lowerCamelCase : Dict = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=__A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) lowerCamelCase : Optional[int] = StableUnCLIPImageNormalizer(embedding_dim=__A ) lowerCamelCase : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) lowerCamelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowerCamelCase : str = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) lowerCamelCase : Any = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__A , layers_per_block=1 , upcast_attention=__A , use_linear_projection=__A , ) torch.manual_seed(0 ) lowerCamelCase : int = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=__A , steps_offset=1 , ) torch.manual_seed(0 ) lowerCamelCase : Optional[Any] = AutoencoderKL() lowerCamelCase : Optional[int] = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def _snake_case ( self , __A , __A=0 ): """simple docstring""" if str(__A ).startswith("mps" ): lowerCamelCase : Optional[int] = torch.manual_seed(__A ) else: lowerCamelCase : Optional[Any] = torch.Generator(device=__A ).manual_seed(__A ) lowerCamelCase : Tuple = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=__A ) @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) lowerCamelCase : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase : Dict = pipe("anime turle" , generator=__A , output_type="np" ) lowerCamelCase : Dict = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__A , __A ) def _snake_case ( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) lowerCamelCase : Union[str, Any] = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase : Any = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) lowerCamelCase : List[str] = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
283
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase__ = { "configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"], "tokenization_electra": ["ElectraTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["ElectraTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST", "ElectraForCausalLM", "ElectraForMaskedLM", "ElectraForMultipleChoice", "ElectraForPreTraining", "ElectraForQuestionAnswering", "ElectraForSequenceClassification", "ElectraForTokenClassification", "ElectraModel", "ElectraPreTrainedModel", "load_tf_weights_in_electra", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFElectraForMaskedLM", "TFElectraForMultipleChoice", "TFElectraForPreTraining", "TFElectraForQuestionAnswering", "TFElectraForSequenceClassification", "TFElectraForTokenClassification", "TFElectraModel", "TFElectraPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "FlaxElectraForCausalLM", "FlaxElectraForMaskedLM", "FlaxElectraForMultipleChoice", "FlaxElectraForPreTraining", "FlaxElectraForQuestionAnswering", "FlaxElectraForSequenceClassification", "FlaxElectraForTokenClassification", "FlaxElectraModel", "FlaxElectraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _snake_case = { '''configuration_squeezebert''': [ '''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SqueezeBertConfig''', '''SqueezeBertOnnxConfig''', ], '''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''SqueezeBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SqueezeBertForMaskedLM''', '''SqueezeBertForMultipleChoice''', '''SqueezeBertForQuestionAnswering''', '''SqueezeBertForSequenceClassification''', '''SqueezeBertForTokenClassification''', '''SqueezeBertModel''', '''SqueezeBertModule''', '''SqueezeBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
283
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class __A ( unittest.TestCase ): def __init__(self : str , __a : str , __a : Optional[int]=7 , __a : List[str]=3 , __a : Any=18 , __a : List[str]=30 , __a : Dict=400 , __a : Optional[Any]=True , __a : Optional[Any]=None , __a : int=True , __a : int=None , __a : List[str]=True , __a : Union[str, Any]=[0.5, 0.5, 0.5] , __a : Any=[0.5, 0.5, 0.5] , ): UpperCAmelCase_ = size if size is not None else {"shortest_edge": 18} UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 18, "width": 18} UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = image_size UpperCAmelCase_ = min_resolution UpperCAmelCase_ = max_resolution UpperCAmelCase_ = do_resize UpperCAmelCase_ = size UpperCAmelCase_ = do_center_crop UpperCAmelCase_ = crop_size UpperCAmelCase_ = do_normalize UpperCAmelCase_ = image_mean UpperCAmelCase_ = image_std def _lowercase (self : int ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class __A ( UpperCamelCase__ , unittest.TestCase ): a__ : Any = LevitImageProcessor if is_vision_available() else None def _lowercase (self : int ): UpperCAmelCase_ = LevitImageProcessingTester(self ) @property def _lowercase (self : Any ): return self.image_processor_tester.prepare_image_processor_dict() def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__a , "image_mean" ) ) self.assertTrue(hasattr(__a , "image_std" ) ) self.assertTrue(hasattr(__a , "do_normalize" ) ) self.assertTrue(hasattr(__a , "do_resize" ) ) self.assertTrue(hasattr(__a , "do_center_crop" ) ) self.assertTrue(hasattr(__a , "size" ) ) def _lowercase (self : Dict ): UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def _lowercase (self : int ): pass def _lowercase (self : Optional[Any] ): # Initialize image_processing UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a ) for image in image_inputs: self.assertIsInstance(__a , Image.Image ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _lowercase (self : Dict ): # Initialize image_processing UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a ) for image in image_inputs: self.assertIsInstance(__a , np.ndarray ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _lowercase (self : int ): # Initialize image_processing UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a ) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCAmelCase_ = image_processing(__a , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''edbeeching/decision-transformer-gym-hopper-medium''': ( '''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json''' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : str = "decision_transformer" __A : Union[str, Any] = ["past_key_values"] __A : Optional[int] = { "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , __A=17 , __A=4 , __A=128 , __A=4096 , __A=True , __A=1 , __A=1024 , __A=3 , __A=1 , __A=None , __A="relu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=1e-5 , __A=0.02 , __A=True , __A=True , __A=5_0256 , __A=5_0256 , __A=False , __A=False , **__A , ): """simple docstring""" lowerCamelCase : List[str] = state_dim lowerCamelCase : Tuple = act_dim lowerCamelCase : List[str] = hidden_size lowerCamelCase : Optional[Any] = max_ep_len lowerCamelCase : Union[str, Any] = action_tanh lowerCamelCase : int = vocab_size lowerCamelCase : List[Any] = n_positions lowerCamelCase : Dict = n_layer lowerCamelCase : int = n_head lowerCamelCase : List[Any] = n_inner lowerCamelCase : Any = activation_function lowerCamelCase : Optional[int] = resid_pdrop lowerCamelCase : str = embd_pdrop lowerCamelCase : Tuple = attn_pdrop lowerCamelCase : List[Any] = layer_norm_epsilon lowerCamelCase : Dict = initializer_range lowerCamelCase : Optional[int] = scale_attn_weights lowerCamelCase : List[Any] = use_cache lowerCamelCase : Tuple = scale_attn_by_inverse_layer_idx lowerCamelCase : Optional[int] = reorder_and_upcast_attn lowerCamelCase : Dict = bos_token_id lowerCamelCase : Any = eos_token_id super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
283
0
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCamelCase : Union[str, Any] = 16 lowerCamelCase : int = 32 def _SCREAMING_SNAKE_CASE (A , A = 16 ) -> str: """simple docstring""" lowercase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowercase__ = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(A ): # max_length=None => use the model max length (it's actually the default) lowercase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowercase__ = datasets.map( A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(A ): # On TPU it's best to pad everything to the same length or training will be very slow. lowercase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowercase__ = 16 elif accelerator.mixed_precision != "no": lowercase__ = 8 else: lowercase__ = None return tokenizer.pad( A , padding='''longest''' , max_length=A , pad_to_multiple_of=A , return_tensors='''pt''' , ) # Instantiate dataloaders. lowercase__ = DataLoader( tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A ) lowercase__ = DataLoader( tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCamelCase : List[Any] = mocked_dataloaders # noqa: F811 def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple: """simple docstring""" if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , A ) == "1": lowercase__ = 2 # Initialize accelerator lowercase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ = config['''lr'''] lowercase__ = int(config['''num_epochs'''] ) lowercase__ = int(config['''seed'''] ) lowercase__ = int(config['''batch_size'''] ) lowercase__ = evaluate.load('''glue''' , '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=A ) def inner_training_loop(A ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(A ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowercase__ = model.to(accelerator.device ) # Instantiate optimizer lowercase__ = AdamW(params=model.parameters() , lr=A ) lowercase__ ,lowercase__ = get_dataloaders(A , A ) # Instantiate scheduler lowercase__ = get_linear_schedule_with_warmup( optimizer=A , num_warmup_steps=100 , num_training_steps=(len(A ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = accelerator.prepare( A , A , A , A , A ) # Now we train the model for epoch in range(A ): model.train() for step, batch in enumerate(A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowercase__ = model(**A ) lowercase__ = outputs.loss accelerator.backward(A ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase__ = model(**A ) lowercase__ = outputs.logits.argmax(dim=-1 ) lowercase__ ,lowercase__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=A , references=A , ) lowercase__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:" , A ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def _SCREAMING_SNAKE_CASE () -> Dict: """simple docstring""" lowercase__ = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=A , default=A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) lowercase__ = parser.parse_args() lowercase__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(A , A ) if __name__ == "__main__": main()
2
def lowercase_( SCREAMING_SNAKE_CASE_ = 4000000 ): '''simple docstring''' lowerCamelCase : Any = [0, 1] lowerCamelCase : Union[str, Any] = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 lowerCamelCase : Union[str, Any] = 0 for j in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f'''{solution() = }''')
283
0
'''simple docstring''' import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: A : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: A : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ): A : List[Any] = FlaxAutoModel.from_pretrained('''bert-base''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): A : Optional[int] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ): A : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ): A : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
3
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _snake_case = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
283
0
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case =logging.get_logger(__name__) __snake_case ={ """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class UpperCAmelCase_ ( __lowercase ): lowerCamelCase : str = '''mvp''' lowerCamelCase : str = ['''past_key_values'''] lowerCamelCase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : List[str] , UpperCAmelCase__ : List[Any]=5_0_2_6_7 , UpperCAmelCase__ : Union[str, Any]=1_0_2_4 , UpperCAmelCase__ : Union[str, Any]=1_2 , UpperCAmelCase__ : Optional[Any]=4_0_9_6 , UpperCAmelCase__ : str=1_6 , UpperCAmelCase__ : List[str]=1_2 , UpperCAmelCase__ : int=4_0_9_6 , UpperCAmelCase__ : List[Any]=1_6 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Optional[Any]=1_0_2_4 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Union[str, Any]=1_0_0 , UpperCAmelCase__ : Tuple=8_0_0 , **UpperCAmelCase__ : Optional[int] , ) -> List[str]: lowerCAmelCase = vocab_size lowerCAmelCase = max_position_embeddings lowerCAmelCase = d_model lowerCAmelCase = encoder_ffn_dim lowerCAmelCase = encoder_layers lowerCAmelCase = encoder_attention_heads lowerCAmelCase = decoder_ffn_dim lowerCAmelCase = decoder_layers lowerCAmelCase = decoder_attention_heads lowerCAmelCase = dropout lowerCAmelCase = attention_dropout lowerCAmelCase = activation_dropout lowerCAmelCase = activation_function lowerCAmelCase = init_std lowerCAmelCase = encoder_layerdrop lowerCAmelCase = decoder_layerdrop lowerCAmelCase = classifier_dropout lowerCAmelCase = use_cache lowerCAmelCase = encoder_layers lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True lowerCAmelCase = use_prompt lowerCAmelCase = prompt_length lowerCAmelCase = prompt_mid_dim super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , ) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , UpperCAmelCase__ ): lowerCAmelCase = self.bos_token_id warnings.warn( F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' 'The config can simply be saved and uploaded again to be fixed.' )
4
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline _snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False) parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''') parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''') _snake_case = parser.parse_args() _snake_case = '''cpu''' _snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings''' _snake_case = '''path-to-your-trained-model''' _snake_case = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: _snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) _snake_case = pipe.to(device) # to channels last _snake_case = pipe.unet.to(memory_format=torch.channels_last) _snake_case = pipe.vae.to(memory_format=torch.channels_last) _snake_case = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: _snake_case = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex _snake_case = torch.randn(2, 4, 64, 64) _snake_case = torch.rand(1) * 9_99 _snake_case = torch.randn(2, 77, 7_68) _snake_case = (sample, timestep, encoder_hidden_status) try: _snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: _snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) _snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) _snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: _snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute _snake_case = 6_66 _snake_case = torch.Generator(device).manual_seed(seed) _snake_case = {'''generator''': generator} if args.steps is not None: _snake_case = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): _snake_case = pipe(prompt, **generate_kwargs).images[0] # save image image.save('''generated.png''')
283
0
from manim import * class lowerCamelCase__ ( lowerCAmelCase): def __A (self ) -> List[str]: _lowercase =Rectangle(height=0.5 , width=0.5 ) _lowercase =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _lowercase =Rectangle(height=0.25 , width=0.25 ) _lowercase =[mem.copy() for i in range(6 )] _lowercase =[mem.copy() for i in range(6 )] _lowercase =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 ) _lowercase =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 ) _lowercase =VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 ) _lowercase =Text('''CPU''' , font_size=2_4 ) _lowercase =Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(UpperCAmelCase ) _lowercase =[mem.copy() for i in range(4 )] _lowercase =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 ) _lowercase =Text('''GPU''' , font_size=2_4 ) _lowercase =Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(UpperCAmelCase ) _lowercase =[mem.copy() for i in range(6 )] _lowercase =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 ) _lowercase =Text('''Model''' , font_size=2_4 ) _lowercase =Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(UpperCAmelCase ) _lowercase =[] _lowercase =[] for i, rect in enumerate(UpperCAmelCase ): _lowercase =fill.copy().set_fill(UpperCAmelCase , opacity=0.8 ) target.move_to(UpperCAmelCase ) model_arr.append(UpperCAmelCase ) _lowercase =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(UpperCAmelCase ) self.add(*UpperCAmelCase , *UpperCAmelCase ) _lowercase =[meta_mem.copy() for i in range(6 )] _lowercase =[meta_mem.copy() for i in range(6 )] _lowercase =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 ) _lowercase =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 ) _lowercase =VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 ) _lowercase =Text('''Disk''' , font_size=2_4 ) _lowercase =Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase ) disk.move_to([-4, -1.25, 0] ) self.add(UpperCAmelCase , UpperCAmelCase ) _lowercase =Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _lowercase =MarkupText( f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(UpperCAmelCase , UpperCAmelCase ) _lowercase =MarkupText( f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=1_8 , ) blue_text.next_to(UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(UpperCAmelCase ) _lowercase =MarkupText( f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) self.play(Write(UpperCAmelCase ) ) _lowercase =Square(0.3 ) input.set_fill(UpperCAmelCase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , UpperCAmelCase , buff=0.5 ) self.play(Write(UpperCAmelCase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=UpperCAmelCase , buff=0.02 ) self.play(MoveToTarget(UpperCAmelCase ) ) self.play(FadeOut(UpperCAmelCase ) ) _lowercase =Arrow(start=UpperCAmelCase , end=UpperCAmelCase , color=UpperCAmelCase , buff=0.5 ) a.next_to(model_arr[0].get_left() , UpperCAmelCase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) _lowercase =MarkupText( f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) self.play(Write(UpperCAmelCase , run_time=3 ) ) _lowercase ={'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02} self.play( Write(UpperCAmelCase ) , Circumscribe(model_arr[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) _lowercase =a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCAmelCase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) _lowercase =AnimationGroup( FadeOut(UpperCAmelCase , run_time=0.5 ) , MoveToTarget(UpperCAmelCase , run_time=0.5 ) , FadeIn(UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(UpperCAmelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: _lowercase =0.7 self.play( Circumscribe(model_arr[i] , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase , **UpperCAmelCase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) _lowercase =a_c _lowercase =a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(UpperCAmelCase ) , FadeOut(UpperCAmelCase , run_time=0.5 ) , ) _lowercase =MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(UpperCAmelCase , run_time=3 ) , MoveToTarget(UpperCAmelCase ) ) self.wait()
5
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class UpperCAmelCase_ : '''simple docstring''' def __init__( self , __A , ): """simple docstring""" lowerCamelCase : str = parent lowerCamelCase : Union[str, Any] = 13 lowerCamelCase : Optional[Any] = 7 lowerCamelCase : List[str] = True lowerCamelCase : Optional[int] = True lowerCamelCase : Union[str, Any] = True lowerCamelCase : List[Any] = True lowerCamelCase : Tuple = True lowerCamelCase : Any = False lowerCamelCase : int = False lowerCamelCase : Tuple = False lowerCamelCase : Union[str, Any] = 2 lowerCamelCase : Dict = 99 lowerCamelCase : Tuple = 0 lowerCamelCase : Any = 32 lowerCamelCase : List[Any] = 2 lowerCamelCase : Tuple = 4 lowerCamelCase : List[str] = 0.1 lowerCamelCase : int = 0.1 lowerCamelCase : int = 512 lowerCamelCase : List[Any] = 16 lowerCamelCase : Any = 2 lowerCamelCase : Any = 0.02 lowerCamelCase : List[str] = 3 lowerCamelCase : Tuple = 4 lowerCamelCase : int = "last" lowerCamelCase : int = True lowerCamelCase : Dict = None lowerCamelCase : Tuple = 0 def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) lowerCamelCase : Tuple = None if self.use_input_lengths: lowerCamelCase : Optional[Any] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCamelCase : str = None if self.use_token_type_ids: lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowerCamelCase : Dict = None lowerCamelCase : Dict = None lowerCamelCase : Tuple = None if self.use_labels: lowerCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase : int = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase : List[Any] = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Optional[Any] = TFFlaubertModel(config=__A ) lowerCamelCase : Any = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} lowerCamelCase : Dict = model(__A ) lowerCamelCase : Any = [input_ids, input_mask] lowerCamelCase : Tuple = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : int = TFFlaubertWithLMHeadModel(__A ) lowerCamelCase : List[str] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} lowerCamelCase : int = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(__A ) lowerCamelCase : Optional[int] = {"input_ids": input_ids, "lengths": input_lengths} lowerCamelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Optional[int] = TFFlaubertForSequenceClassification(__A ) lowerCamelCase : str = {"input_ids": input_ids, "lengths": input_lengths} lowerCamelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Tuple = self.num_labels lowerCamelCase : Optional[Any] = TFFlaubertForTokenClassification(config=__A ) lowerCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCamelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Any = self.num_choices lowerCamelCase : Optional[Any] = TFFlaubertForMultipleChoice(config=__A ) lowerCamelCase : Tuple = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase : int = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase : List[str] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase : Optional[int] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } lowerCamelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = self.prepare_config_and_inputs() ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Optional[Any] = config_and_inputs lowerCamelCase : List[Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "langs": token_type_ids, "lengths": input_lengths, } return config, inputs_dict @require_tf class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): '''simple docstring''' __A : str = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) __A : Dict = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable __A : Any = ( { "feature-extraction": TFFlaubertModel, "fill-mask": TFFlaubertWithLMHeadModel, "question-answering": TFFlaubertForQuestionAnsweringSimple, "text-classification": TFFlaubertForSequenceClassification, "token-classification": TFFlaubertForTokenClassification, "zero-shot": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) __A : List[str] = False __A : List[str] = False def _snake_case ( self , __A , __A , __A , __A , __A ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _snake_case ( self ): """simple docstring""" lowerCamelCase : Tuple = TFFlaubertModelTester(self ) lowerCamelCase : Optional[int] = ConfigTester(self , config_class=__A , emb_dim=37 ) def _snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*__A ) @slow def _snake_case ( self ): """simple docstring""" for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : int = TFFlaubertModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @require_tf @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[int] = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" ) lowerCamelCase : str = tf.convert_to_tensor( [[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" lowerCamelCase : Dict = model(__A )[0] lowerCamelCase : List[str] = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape , __A ) # compare the actual values for a slice. lowerCamelCase : Tuple = tf.convert_to_tensor( [ [ [-1.8768773, -1.566555, 0.27072418], [-1.6920038, -0.5873505, 1.9329599], [-2.9563985, -1.6993835, 1.7972052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
283
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class __A( unittest.TestCase ): def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=400 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=[0.4814_5466, 0.457_8275, 0.4082_1073] , _snake_case=[0.2686_2954, 0.2613_0258, 0.2757_7711] , _snake_case=True , ) -> Any: '''simple docstring''' __a = size if size is not None else {'''height''': 224, '''width''': 224} __a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __a = parent __a = batch_size __a = num_channels __a = image_size __a = min_resolution __a = max_resolution __a = do_resize __a = size __a = do_center_crop __a = crop_size __a = do_normalize __a = image_mean __a = image_std __a = do_convert_rgb def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False , _snake_case=False , _snake_case=False ) -> Union[str, Any]: '''simple docstring''' assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __a = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: __a = [] for i in range(self.batch_size ): __a , __a = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __a = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs] if torchify: __a = [torch.from_numpy(_snake_case ) for x in image_inputs] return image_inputs @require_torch @require_vision class __A( a , unittest.TestCase ): snake_case_ = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = ChineseCLIPImageProcessingTester(self , do_center_crop=_snake_case ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_snake_case , '''do_resize''' ) ) self.assertTrue(hasattr(_snake_case , '''size''' ) ) self.assertTrue(hasattr(_snake_case , '''do_center_crop''' ) ) self.assertTrue(hasattr(_snake_case , '''center_crop''' ) ) self.assertTrue(hasattr(_snake_case , '''do_normalize''' ) ) self.assertTrue(hasattr(_snake_case , '''image_mean''' ) ) self.assertTrue(hasattr(_snake_case , '''image_std''' ) ) self.assertTrue(hasattr(_snake_case , '''do_convert_rgb''' ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , Image.Image ) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case , numpify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , np.ndarray ) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case , torchify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , torch.Tensor ) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) @require_torch @require_vision class __A( a , unittest.TestCase ): snake_case_ = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_snake_case ) __a = 3 @property def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_snake_case , '''do_resize''' ) ) self.assertTrue(hasattr(_snake_case , '''size''' ) ) self.assertTrue(hasattr(_snake_case , '''do_center_crop''' ) ) self.assertTrue(hasattr(_snake_case , '''center_crop''' ) ) self.assertTrue(hasattr(_snake_case , '''do_normalize''' ) ) self.assertTrue(hasattr(_snake_case , '''image_mean''' ) ) self.assertTrue(hasattr(_snake_case , '''image_std''' ) ) self.assertTrue(hasattr(_snake_case , '''do_convert_rgb''' ) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , Image.Image ) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __a = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
6
import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[] ): '''simple docstring''' lowerCamelCase : Optional[Any] = size[0] - overlap_pixels * 2 lowerCamelCase : int = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels lowerCamelCase : Tuple = np.ones((size_y, size_x) , dtype=np.uinta ) * 255 lowerCamelCase : List[Any] = np.pad(SCREAMING_SNAKE_CASE_ , mode="linear_ramp" , pad_width=SCREAMING_SNAKE_CASE_ , end_values=0 ) if "l" in remove_borders: lowerCamelCase : Optional[Any] = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: lowerCamelCase : List[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: lowerCamelCase : List[Any] = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: lowerCamelCase : Tuple = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return max(SCREAMING_SNAKE_CASE_ , min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Optional[Any] = list(SCREAMING_SNAKE_CASE_ ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap lowerCamelCase : Any = clamp_rect(SCREAMING_SNAKE_CASE_ , [0, 0] , [image_size[0], image_size[1]] ) return rect def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Dict = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(SCREAMING_SNAKE_CASE_ , (original_slice, 0) ) return result def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) lowerCamelCase : int = tile.crop(SCREAMING_SNAKE_CASE_ ) return tile def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : int = n % d return n - divisor class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' def __init__( self , __A , __A , __A , __A , __A , __A , __A = 350 , ): """simple docstring""" super().__init__( vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , low_res_scheduler=__A , scheduler=__A , max_noise_level=__A , ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , **__A ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase : Tuple = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) lowerCamelCase : Union[str, Any] = add_overlap_rect(__A , __A , image.size ) lowerCamelCase : List[str] = image.crop(__A ) lowerCamelCase : Optional[int] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] lowerCamelCase : int = translated_slice_x - (original_image_slice / 2) lowerCamelCase : Optional[Any] = max(0 , __A ) lowerCamelCase : Tuple = squeeze_tile(__A , __A , __A , __A ) lowerCamelCase : Dict = to_input.size lowerCamelCase : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) lowerCamelCase : Dict = super(__A , self ).__call__(image=__A , **__A ).images[0] lowerCamelCase : Tuple = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) lowerCamelCase : Optional[Any] = unsqueeze_tile(__A , __A ) lowerCamelCase : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) lowerCamelCase : int = [] if x == 0: remove_borders.append("l" ) elif crop_rect[2] == image.size[0]: remove_borders.append("r" ) if y == 0: remove_borders.append("t" ) elif crop_rect[3] == image.size[1]: remove_borders.append("b" ) lowerCamelCase : int = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__A ) , mode="L" , ) final_image.paste( __A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __A ) @torch.no_grad() def __call__( self , __A , __A , __A = 75 , __A = 9.0 , __A = 50 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = None , __A = 1 , __A = 128 , __A = 32 , __A = 32 , ): """simple docstring""" lowerCamelCase : Dict = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) ) lowerCamelCase : Union[str, Any] = math.ceil(image.size[0] / tile_size ) lowerCamelCase : Dict = math.ceil(image.size[1] / tile_size ) lowerCamelCase : str = tcx * tcy lowerCamelCase : int = 0 for y in range(__A ): for x in range(__A ): self._process_tile( __A , __A , __A , __A , __A , __A , __A , prompt=__A , num_inference_steps=__A , guidance_scale=__A , noise_level=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , ) current_count += 1 if callback is not None: callback({"progress": current_count / total_tile_count, "image": final_image} ) return final_image def lowercase_( ): '''simple docstring''' lowerCamelCase : Dict = "stabilityai/stable-diffusion-x4-upscaler" lowerCamelCase : Union[str, Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , revision="fp16" , torch_dtype=torch.floataa ) lowerCamelCase : Optional[Any] = pipe.to("cuda" ) lowerCamelCase : List[str] = Image.open("../../docs/source/imgs/diffusers_library.jpg" ) def callback(SCREAMING_SNAKE_CASE_ ): print(f"""progress: {obj['progress']:.4f}""" ) obj["image"].save("diffusers_library_progress.jpg" ) lowerCamelCase : int = pipe(image=SCREAMING_SNAKE_CASE_ , prompt="Black font, white background, vector" , noise_level=40 , callback=SCREAMING_SNAKE_CASE_ ) final_image.save("diffusers_library.jpg" ) if __name__ == "__main__": main()
283
0
def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int = 0 ) -> list: '''simple docstring''' A__ = length or len(SCREAMING_SNAKE_CASE__ ) A__ = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: A__ , A__ = list_data[i + 1], list_data[i] A__ = True return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE__ , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
7
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Dict = "mobilenet_v2" def __init__( self , __A=3 , __A=224 , __A=1.0 , __A=8 , __A=8 , __A=6 , __A=32 , __A=True , __A=True , __A="relu6" , __A=True , __A=0.8 , __A=0.02 , __A=0.001 , __A=255 , **__A , ): """simple docstring""" super().__init__(**__A ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) lowerCamelCase : str = num_channels lowerCamelCase : Any = image_size lowerCamelCase : Union[str, Any] = depth_multiplier lowerCamelCase : Tuple = depth_divisible_by lowerCamelCase : Dict = min_depth lowerCamelCase : Dict = expand_ratio lowerCamelCase : Optional[Any] = output_stride lowerCamelCase : int = first_layer_is_expansion lowerCamelCase : Union[str, Any] = finegrained_output lowerCamelCase : Optional[Any] = hidden_act lowerCamelCase : Optional[Any] = tf_padding lowerCamelCase : Optional[Any] = classifier_dropout_prob lowerCamelCase : Dict = initializer_range lowerCamelCase : str = layer_norm_eps lowerCamelCase : Optional[Any] = semantic_loss_ignore_index class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Union[str, Any] = version.parse("1.11" ) @property def _snake_case ( self ): """simple docstring""" return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _snake_case ( self ): """simple docstring""" if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _snake_case ( self ): """simple docstring""" return 1e-4
283
0
import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class snake_case_ : '''simple docstring''' def __init__( self : List[Any] , _UpperCamelCase : Dict ) ->Any: if isinstance(_UpperCamelCase , _UpperCamelCase ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden snake_case_ = deepcopy(_UpperCamelCase ) elif os.path.exists(_UpperCamelCase ): with io.open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: snake_case_ = json.load(_UpperCamelCase ) else: try: snake_case_ = baseaa.urlsafe_baadecode(_UpperCamelCase ).decode('''utf-8''' ) snake_case_ = json.loads(_UpperCamelCase ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' ) snake_case_ = config self.set_stage_and_offload() def snake_case__( self : Optional[int] ) ->Optional[Any]: # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs to know the dtype, and some other hparams. snake_case_ = self.get_value('''zero_optimization.stage''' , -1 ) # offload snake_case_ = False if self.is_zeroa() or self.is_zeroa(): snake_case_ = set(['''cpu''', '''nvme'''] ) snake_case_ = set( [ self.get_value('''zero_optimization.offload_optimizer.device''' ), self.get_value('''zero_optimization.offload_param.device''' ), ] ) if len(offload_devices & offload_devices_valid ) > 0: snake_case_ = True def snake_case__( self : int , _UpperCamelCase : List[Any] ) ->List[Any]: snake_case_ = self.config # find the config node of interest if it exists snake_case_ = ds_key_long.split('''.''' ) snake_case_ = nodes.pop() for node in nodes: snake_case_ = config.get(_UpperCamelCase ) if config is None: return None, ds_key return config, ds_key def snake_case__( self : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=None ) ->List[Any]: snake_case_, snake_case_ = self.find_config_node(_UpperCamelCase ) if config is None: return default return config.get(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple=False ) ->List[Any]: snake_case_ = self.config # find the config node of interest if it exists snake_case_ = ds_key_long.split('''.''' ) for node in nodes: snake_case_ = config snake_case_ = config.get(_UpperCamelCase ) if config is None: if must_exist: raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' ) else: return # if found remove it if parent_config is not None: parent_config.pop(_UpperCamelCase ) def snake_case__( self : int , _UpperCamelCase : Union[str, Any] ) ->Optional[Any]: snake_case_ = self.get_value(_UpperCamelCase ) return False if value is None else bool(_UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : Dict ) ->Optional[Any]: snake_case_ = self.get_value(_UpperCamelCase ) return False if value is None else not bool(_UpperCamelCase ) def snake_case__( self : Optional[int] ) ->Dict: return self._stage == 2 def snake_case__( self : List[str] ) ->List[str]: return self._stage == 3 def snake_case__( self : Any ) ->Any: return self._offload class snake_case_ : '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : Tuple ) ->Optional[int]: snake_case_ = engine def snake_case__( self : Tuple , _UpperCamelCase : Tuple , **_UpperCamelCase : Dict ) ->Tuple: # runs backpropagation and handles mixed precision self.engine.backward(_UpperCamelCase , **_UpperCamelCase ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class snake_case_ ( __A ): '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : Any ) ->Optional[Any]: super().__init__(_UpperCamelCase , device_placement=_UpperCamelCase , scaler=_UpperCamelCase ) snake_case_ = hasattr(self.optimizer , '''overflow''' ) def snake_case__( self : Optional[Any] , _UpperCamelCase : List[str]=None ) ->List[str]: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def snake_case__( self : Tuple ) ->Any: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def snake_case__( self : Dict ) ->List[str]: if self.__has_overflow__: return self.optimizer.overflow return False class snake_case_ ( __A ): '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Any ) ->Dict: super().__init__(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : Union[str, Any] ) ->List[Any]: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class snake_case_ : '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : Any=0.001 , _UpperCamelCase : Optional[int]=0 , **_UpperCamelCase : List[str] ) ->int: snake_case_ = params snake_case_ = lr snake_case_ = weight_decay snake_case_ = kwargs class snake_case_ : '''simple docstring''' def __init__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Optional[Any]=0 , **_UpperCamelCase : Dict ) ->List[Any]: snake_case_ = optimizer snake_case_ = total_num_steps snake_case_ = warmup_num_steps snake_case_ = kwargs
8
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) _snake_case = 2_99_79_24_58 # Symbols _snake_case , _snake_case , _snake_case , _snake_case = symbols('''ct x y z''') def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if velocity > c: raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError("Speed must be greater than or equal to 1!" ) return velocity / c def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_ ) ** 2 ) def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return np.array( [ [gamma(SCREAMING_SNAKE_CASE_ ), -gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), 0, 0], [-gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), gamma(SCREAMING_SNAKE_CASE_ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' if event is None: lowerCamelCase : Tuple = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(SCREAMING_SNAKE_CASE_ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: _snake_case = transform(29_97_92_45) print('''Example of four vector: ''') print(f'''ct\' = {four_vector[0]}''') print(f'''x\' = {four_vector[1]}''') print(f'''y\' = {four_vector[2]}''') print(f'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values _snake_case = {ct: c, x: 1, y: 1, z: 1} _snake_case = [four_vector[i].subs(sub_dict) for i in range(4)] print(f'''\n{numerical_vector}''')
283
0
def _UpperCamelCase ( lowercase__ ): if num <= 0: raise ValueError('''Input must be a positive integer''' ) __SCREAMING_SNAKE_CASE : Tuple = [True] * (num + 1) __SCREAMING_SNAKE_CASE : Dict = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , lowercase__ ): __SCREAMING_SNAKE_CASE : str = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[str] =int(input('Enter a positive integer: ').strip()) print(prime_sieve_eratosthenes(user_num))
9
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor _snake_case = logging.get_logger(__name__) class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' def __init__( self , *__A , **__A ): """simple docstring""" warnings.warn( "The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DPTImageProcessor instead." , __A , ) super().__init__(*__A , **__A )
283
0
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "encoder.layer_norm_for_extract": "layer_norm_for_extract", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "label_embs_concat": "label_embeddings_concat", "mask_emb": "masked_spec_embed", "spk_proj": "speaker_proj", } __A = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "label_embeddings_concat", "speaker_proj", "layer_norm_for_extract", ] def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Optional[Any]: """simple docstring""" for attribute in key.split("." ): lowerCamelCase__: str =getattr(__a , __a ) if weight_type is not None: lowerCamelCase__: Tuple =getattr(__a , __a ).shape else: lowerCamelCase__: List[str] =hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowerCamelCase__: List[Any] =value elif weight_type == "weight_g": lowerCamelCase__: Tuple =value elif weight_type == "weight_v": lowerCamelCase__: List[str] =value elif weight_type == "bias": lowerCamelCase__: List[str] =value else: lowerCamelCase__: str =value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def lowerCAmelCase_ ( __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: Tuple =[] lowerCamelCase__: List[str] =fairseq_model.state_dict() lowerCamelCase__: Any =hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase__: Tuple =False if "conv_layers" in name: load_conv_layer( __a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , ) lowerCamelCase__: Optional[int] =True else: for key, mapped_key in MAPPING.items(): lowerCamelCase__: List[Any] ="unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key): # special case since naming is very similar continue lowerCamelCase__: str =True if "*" in mapped_key: lowerCamelCase__: List[str] =name.split(__a )[0].split("." )[-2] lowerCamelCase__: Tuple =mapped_key.replace("*" , __a ) if "weight_g" in name: lowerCamelCase__: str ="weight_g" elif "weight_v" in name: lowerCamelCase__: Union[str, Any] ="weight_v" elif "bias" in name: lowerCamelCase__: Dict ="bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCamelCase__: Dict ="weight" else: lowerCamelCase__: Any =None set_recursively(__a , __a , __a , __a , __a ) continue if not is_used: unused_weights.append(__a ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Any: """simple docstring""" lowerCamelCase__: Any =full_name.split("conv_layers." )[-1] lowerCamelCase__: Tuple =name.split("." ) lowerCamelCase__: Any =int(items[0] ) lowerCamelCase__: Optional[int] =int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowerCamelCase__: List[Any] =value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowerCamelCase__: Any =value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) lowerCamelCase__: Optional[int] =value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowerCamelCase__: str =value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__a ) @torch.no_grad() def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=True ) -> Optional[Any]: """simple docstring""" if config_path is not None: lowerCamelCase__: Dict =UniSpeechSatConfig.from_pretrained(__a ) else: lowerCamelCase__: Union[str, Any] =UniSpeechSatConfig() lowerCamelCase__: Any ="" if is_finetuned: lowerCamelCase__: Optional[int] =UniSpeechSatForCTC(__a ) else: lowerCamelCase__: Optional[int] =UniSpeechSatForPreTraining(__a ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) lowerCamelCase__: Optional[Any] =model[0].eval() recursively_load_weights(__a , __a ) hf_wavavec.save_pretrained(__a ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __A = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
10
import argparse _snake_case = '''docs/source/_static/js/custom.js''' def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' with open(SCREAMING_SNAKE_CASE_ , encoding="utf-8" , newline="\n" ) as f: lowerCamelCase : List[str] = f.readlines() lowerCamelCase : int = 0 # First let's put the right version while not lines[index].startswith("const stableVersion =" ): index += 1 lowerCamelCase : str = f"""const stableVersion = \"v{version}\"\n""" # Then update the dictionary while not lines[index].startswith("const versionMapping = {" ): index += 1 # We go until the end while not lines[index].startswith("}" ): index += 1 # We add the new version at the end lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n""" with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument('''--version''', help='''Release version.''') _snake_case = parser.parse_args() update_custom_js(args.version)
283
0
from __future__ import annotations def _UpperCAmelCase (UpperCamelCase__ : list ): if not nums: raise ValueError("List is empty" ) return sum(UpperCamelCase__ ) / len(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
11
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Any = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[int] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : str = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : List[Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[int] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Dict = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Dict = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Union[str, Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[int] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Any = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : int = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] )
283
0
UpperCAmelCase_ = { "km/h": 1.0, "m/s": 3.6, "mph": 1.60_9344, "knot": 1.852, } UpperCAmelCase_ = { "km/h": 1.0, "m/s": 0.2_7777_7778, "mph": 0.6_2137_1192, "knot": 0.5_3995_6803, } def lowerCamelCase__ ( A__ : float , A__ : str , A__ : str ): '''simple docstring''' if unit_to not in speed_chart or unit_from not in speed_chart_inverse: __lowerCamelCase = ( f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n' f'Valid values are: {", ".join(A__ )}' ) raise ValueError(A__ ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
12
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Dict = "openai/whisper-base" __A : str = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __A : Any = "transcriber" __A : Any = WhisperProcessor __A : int = WhisperForConditionalGeneration __A : Any = ["audio"] __A : List[str] = ["text"] def _snake_case ( self , __A ): """simple docstring""" return self.pre_processor(__A , return_tensors="pt" ).input_features def _snake_case ( self , __A ): """simple docstring""" return self.model.generate(inputs=__A ) def _snake_case ( self , __A ): """simple docstring""" return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0]
283
0
import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : Union[str, Any] = RoFormerTokenizer _UpperCAmelCase : Any = RoFormerTokenizerFast _UpperCAmelCase : List[str] = True _UpperCAmelCase : List[str] = True def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): super().setUp() def _SCREAMING_SNAKE_CASE ( self : int , **lowerCAmelCase__ : Tuple): return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : int , **lowerCAmelCase__ : int): return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Union[str, Any] = "永和服装饰品有限公司,今天天气非常好" SCREAMING_SNAKE_CASE_: Union[str, Any] = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Any = self.get_tokenizer() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_chinese_input_output_texts() SCREAMING_SNAKE_CASE_: Tuple = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , output_text.split()) SCREAMING_SNAKE_CASE_: List[Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE_: Any = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_chinese_input_output_texts() SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , output_text.split()) SCREAMING_SNAKE_CASE_: Any = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE_: int = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Any): pass def _SCREAMING_SNAKE_CASE ( self : List[Any]): pass def _SCREAMING_SNAKE_CASE ( self : List[Any]): pass
13
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if "model" in orig_key: lowerCamelCase : Dict = orig_key.replace("model." , "" ) if "norm1" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("norm1" , "attention.output.LayerNorm" ) if "norm2" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("norm2" , "output.LayerNorm" ) if "norm" in orig_key: lowerCamelCase : Optional[Any] = orig_key.replace("norm" , "LayerNorm" ) if "transformer" in orig_key: lowerCamelCase : int = orig_key.split("." )[0].split("_" )[-1] lowerCamelCase : Dict = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: lowerCamelCase : List[str] = orig_key.replace("mha.attn" , "attention.self" ) if "mha" in orig_key: lowerCamelCase : List[Any] = orig_key.replace("mha" , "attention" ) if "W_q" in orig_key: lowerCamelCase : Optional[int] = orig_key.replace("W_q" , "self.query" ) if "W_k" in orig_key: lowerCamelCase : List[Any] = orig_key.replace("W_k" , "self.key" ) if "W_v" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("W_v" , "self.value" ) if "ff1" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("ff1" , "intermediate.dense" ) if "ff2" in orig_key: lowerCamelCase : Optional[int] = orig_key.replace("ff2" , "output.dense" ) if "ff" in orig_key: lowerCamelCase : Optional[int] = orig_key.replace("ff" , "output.dense" ) if "mlm_class" in orig_key: lowerCamelCase : Dict = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" ) if "mlm" in orig_key: lowerCamelCase : List[Any] = orig_key.replace("mlm" , "cls.predictions.transform" ) if "cls" not in orig_key: lowerCamelCase : int = "yoso." + orig_key return orig_key def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCamelCase : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if ("pooler" in key) or ("sen_class" in key): continue else: lowerCamelCase : Dict = val lowerCamelCase : Dict = orig_state_dict["cls.predictions.decoder.bias"] lowerCamelCase : Dict = torch.arange(SCREAMING_SNAKE_CASE_ ).expand((1, -1) ) + 2 return orig_state_dict def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : List[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model_state_dict"] lowerCamelCase : List[str] = YosoConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Any = YosoForMaskedLM(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : List[Any] = convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE_ ) print(model.load_state_dict(SCREAMING_SNAKE_CASE_ ) ) model.eval() model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The json file for YOSO model config.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _snake_case = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
283
0
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]: """simple docstring""" for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=True ) -> str: """simple docstring""" model.train() A__ = model(lowercase_ ) A__ = F.mse_loss(lowercase_ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> int: """simple docstring""" set_seed(42 ) A__ = RegressionModel() A__ = deepcopy(lowercase_ ) A__ = RegressionDataset(length=80 ) A__ = DataLoader(lowercase_ , batch_size=16 ) model.to(accelerator.device ) if sched: A__ = AdamW(params=model.parameters() , lr=1E-3 ) A__ = AdamW(params=ddp_model.parameters() , lr=1E-3 ) A__ = LambdaLR(lowercase_ , lr_lambda=lambda lowercase_ : epoch**0.65 ) A__ = LambdaLR(lowercase_ , lr_lambda=lambda lowercase_ : epoch**0.65 ) # Make a copy of `model` if sched: A__ , A__ , A__ , A__ = accelerator.prepare(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) else: A__ , A__ = accelerator.prepare(lowercase_ , lowercase_ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]: """simple docstring""" A__ , A__ , A__ = get_training_setup(lowercase_ ) # Use a single batch A__ , A__ = next(iter(lowercase_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model A__ , A__ = accelerator.gather((ddp_input, ddp_target) ) A__ , A__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase_ ): step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) else: # Sync grads step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) A__ = ddp_input[torch.randperm(len(lowercase_ ) )] def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]: """simple docstring""" A__ , A__ , A__ = get_training_setup(lowercase_ ) # Use a single batch A__ , A__ = next(iter(lowercase_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model A__ , A__ = accelerator.gather((ddp_input, ddp_target) ) A__ , A__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase_ ): step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) else: # Sync grads step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) A__ = ddp_input[torch.randperm(len(lowercase_ ) )] def SCREAMING_SNAKE_CASE ( lowercase_=False , lowercase_=False ) -> Dict: """simple docstring""" A__ = Accelerator( split_batches=lowercase_ , dispatch_batches=lowercase_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly A__ , A__ , A__ = get_training_setup(lowercase_ ) for iteration, batch in enumerate(lowercase_ ): A__ , A__ = batch.values() # Gather the distributed inputs and targs for the base model A__ , A__ = accelerator.gather((ddp_input, ddp_target) ) A__ , A__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowercase_ ): step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase_ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) A__ = ddp_input[torch.randperm(len(lowercase_ ) )] GradientState._reset_state() def SCREAMING_SNAKE_CASE ( lowercase_=False , lowercase_=False ) -> int: """simple docstring""" A__ = Accelerator( split_batches=lowercase_ , dispatch_batches=lowercase_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly A__ , A__ , A__ , A__ , A__ , A__ , A__ = get_training_setup(lowercase_ , lowercase_ ) for iteration, batch in enumerate(lowercase_ ): A__ , A__ = batch.values() # Gather the distributed inputs and targs for the base model A__ , A__ = accelerator.gather((ddp_input, ddp_target) ) A__ , A__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase_ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowercase_ ): step_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" A__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase_ )) if accelerator.num_processes > 1: check_model_parameters(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" A__ = Accelerator() A__ = RegressionDataset(length=80 ) A__ = DataLoader(lowercase_ , batch_size=16 ) A__ = RegressionDataset(length=96 ) A__ = DataLoader(lowercase_ , batch_size=16 ) A__ , A__ = accelerator.prepare(lowercase_ , lowercase_ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowercase_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase_ ) if iteration < len(lowercase_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowercase_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase_ ) if batch_num < len(lowercase_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def SCREAMING_SNAKE_CASE ( ) -> Dict: """simple docstring""" A__ = Accelerator() A__ = accelerator.state if state.local_process_index == 0: print('''**Test `accumulate` gradient accumulation with dataloader break**''' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('''**Test NOOP `no_sync` context manager**''' ) test_noop_sync(lowercase_ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('''**Test Distributed `no_sync` context manager**''' ) test_distributed_sync(lowercase_ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation, ''' , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(lowercase_ , lowercase_ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple: """simple docstring""" main() if __name__ == "__main__": main()
14
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Optional[int] = "M-CLIP" def __init__( self , __A=1024 , __A=768 , **__A ): """simple docstring""" lowerCamelCase : str = transformerDimSize lowerCamelCase : Any = imageDimSize super().__init__(**__A ) class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Tuple = MCLIPConfig def __init__( self , __A , *__A , **__A ): """simple docstring""" super().__init__(__A , *__A , **__A ) lowerCamelCase : Tuple = XLMRobertaModel(__A ) lowerCamelCase : Optional[Any] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def _snake_case ( self , __A , __A ): """simple docstring""" lowerCamelCase : Any = self.transformer(input_ids=__A , attention_mask=__A )[0] lowerCamelCase : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(__A ), embs
283
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE :str = { 'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'], 'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Dict = ['VisionTextDualEncoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Any = ['FlaxVisionTextDualEncoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :List[str] = ['TFVisionTextDualEncoderModel'] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys SCREAMING_SNAKE_CASE :str = _LazyModule(__name__, globals()['__file__'], _import_structure)
15
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = tempfile.mkdtemp() # fmt: off lowerCamelCase : Any = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on lowerCamelCase : List[Any] = dict(zip(__A , range(len(__A ) ) ) ) lowerCamelCase : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] lowerCamelCase : Optional[Any] = {"unk_token": "<unk>"} lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__A ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__A ) ) lowerCamelCase : str = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } lowerCamelCase : str = os.path.join(self.tmpdirname , __A ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(__A , __A ) def _snake_case ( self , **__A ): """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__A ) def _snake_case ( self , **__A ): """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__A ) def _snake_case ( self , **__A ): """simple docstring""" return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__A ) def _snake_case ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase : Tuple = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = self.get_tokenizer() lowerCamelCase : Optional[Any] = self.get_rust_tokenizer() lowerCamelCase : Tuple = self.get_image_processor() lowerCamelCase : List[Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A ) processor_slow.save_pretrained(self.tmpdirname ) lowerCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__A ) lowerCamelCase : Optional[int] = OwlViTProcessor(tokenizer=__A , image_processor=__A ) processor_fast.save_pretrained(self.tmpdirname ) lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __A ) self.assertIsInstance(processor_fast.tokenizer , __A ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __A ) self.assertIsInstance(processor_fast.image_processor , __A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowerCamelCase : List[str] = self.get_image_processor(do_normalize=__A ) lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__A ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = self.get_image_processor() lowerCamelCase : Optional[int] = self.get_tokenizer() lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Tuple = self.prepare_image_inputs() lowerCamelCase : int = image_processor(__A , return_tensors="np" ) lowerCamelCase : Union[str, Any] = processor(images=__A , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Union[str, Any] = self.get_image_processor() lowerCamelCase : Dict = self.get_tokenizer() lowerCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Tuple = "lower newer" lowerCamelCase : Union[str, Any] = processor(text=__A , return_tensors="np" ) lowerCamelCase : List[Any] = tokenizer(__A , return_tensors="np" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Any = self.get_image_processor() lowerCamelCase : Any = self.get_tokenizer() lowerCamelCase : int = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Optional[Any] = "lower newer" lowerCamelCase : Dict = self.prepare_image_inputs() lowerCamelCase : Any = processor(text=__A , images=__A ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Any = "google/owlvit-base-patch32" lowerCamelCase : List[Any] = OwlViTProcessor.from_pretrained(__A ) lowerCamelCase : Tuple = ["cat", "nasa badge"] lowerCamelCase : str = processor(text=__A ) lowerCamelCase : Union[str, Any] = 16 self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = "google/owlvit-base-patch32" lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(__A ) lowerCamelCase : Dict = [["cat", "nasa badge"], ["person"]] lowerCamelCase : int = processor(text=__A ) lowerCamelCase : Tuple = 16 lowerCamelCase : Any = len(__A ) lowerCamelCase : Optional[Any] = max([len(__A ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = "google/owlvit-base-patch32" lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(__A ) lowerCamelCase : List[Any] = ["cat", "nasa badge"] lowerCamelCase : Optional[Any] = processor(text=__A ) lowerCamelCase : int = 16 lowerCamelCase : List[str] = inputs["input_ids"] lowerCamelCase : int = [ [4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Any = self.get_image_processor() lowerCamelCase : List[str] = self.get_tokenizer() lowerCamelCase : str = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Dict = self.prepare_image_inputs() lowerCamelCase : Union[str, Any] = self.prepare_image_inputs() lowerCamelCase : Any = processor(images=__A , query_images=__A ) self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[Any] = self.get_image_processor() lowerCamelCase : Optional[int] = self.get_tokenizer() lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase : List[Any] = processor.batch_decode(__A ) lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(__A ) self.assertListEqual(__A , __A )
283
0
"""simple docstring""" from statistics import mean import numpy as np def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> list: lowercase__ : Union[str, Any] = 0 # Number of processes finished lowercase__ : Optional[Any] = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. lowercase__ : Union[str, Any] = [0] * no_of_process # List to include calculation results lowercase__ : List[Any] = [0] * no_of_process # Sort by arrival time. lowercase__ : int = [burst_time[i] for i in np.argsort(__lowerCamelCase )] lowercase__ : str = [process_name[i] for i in np.argsort(__lowerCamelCase )] arrival_time.sort() while no_of_process > finished_process_count: lowercase__ : List[Any] = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: lowercase__ : str = arrival_time[i] lowercase__ : Optional[Any] = 0 # Index showing the location of the process being performed lowercase__ : Optional[int] = 0 # Saves the current response ratio. lowercase__ : str = 0 for i in range(0 , __lowerCamelCase ): if finished_process[i] == 0 and arrival_time[i] <= current_time: lowercase__ : Tuple = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: lowercase__ : int = temp lowercase__ : Optional[Any] = i # Calculate the turn around time lowercase__ : List[Any] = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. lowercase__ : List[Any] = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> list: lowercase__ : str = [0] * no_of_process for i in range(0 , __lowerCamelCase ): lowercase__ : str = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": lowerCAmelCase_ = 5 lowerCAmelCase_ = ['A', 'B', 'C', 'D', 'E'] lowerCAmelCase_ = [1, 2, 3, 4, 5] lowerCAmelCase_ = [1, 2, 3, 4, 5] lowerCAmelCase_ = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) lowerCAmelCase_ = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time') for i in range(0, no_of_process): print( F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t''' F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}''' ) print(F'''average waiting time : {mean(waiting_time):.5f}''') print(F'''average turn around time : {mean(turn_around_time):.5f}''')
16
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase_ ( UpperCamelCase , unittest.TestCase ): '''simple docstring''' __A : List[Any] = BioGptTokenizer __A : Optional[int] = False def _snake_case ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase : Union[str, Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] lowerCamelCase : str = dict(zip(__A , range(len(__A ) ) ) ) lowerCamelCase : Dict = ["l o 123", "lo w 1456", "e r</w> 1789", ""] lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(__A ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(__A ) ) def _snake_case ( self , __A ): """simple docstring""" lowerCamelCase : Dict = "lower newer" lowerCamelCase : Union[str, Any] = "lower newer" return input_text, output_text def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[str] = BioGptTokenizer(self.vocab_file , self.merges_file ) lowerCamelCase : Optional[int] = "lower" lowerCamelCase : Any = ["low", "er</w>"] lowerCamelCase : List[str] = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCamelCase : Union[str, Any] = tokens + ["<unk>"] lowerCamelCase : List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) @slow def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[str] = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) lowerCamelCase : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=__A ) lowerCamelCase : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=__A ) lowerCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__A ) lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
283
0
"""simple docstring""" def _A ( UpperCamelCase_ : int, UpperCamelCase_ : int) -> int: '''simple docstring''' return int((input_a, input_a).count(0) == 0) def _A ( ) -> None: '''simple docstring''' assert and_gate(0, 0) == 0 assert and_gate(0, 1) == 0 assert and_gate(1, 0) == 0 assert and_gate(1, 1) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
17
def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if divisor % 5 == 0 or divisor % 2 == 0: return 0 lowerCamelCase : List[Any] = 1 lowerCamelCase : Union[str, Any] = 1 while repunit: lowerCamelCase : Union[str, Any] = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def lowercase_( SCREAMING_SNAKE_CASE_ = 1000000 ): '''simple docstring''' lowerCamelCase : List[str] = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(SCREAMING_SNAKE_CASE_ ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f'''{solution() = }''')
283
0
import requests __lowerCamelCase : Dict = '''YOUR API KEY''' def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str = giphy_api_key ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = "+".join(query.split() ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = f'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}' SCREAMING_SNAKE_CASE_ : List[str] = requests.get(lowerCAmelCase ).json()["data"] return [gif["url"] for gif in gifs] if __name__ == "__main__": print('''\n'''.join(get_gifs('''space ship''')))
18
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ): '''simple docstring''' lowerCamelCase : Tuple = "backbone." if is_semantic else "" lowerCamelCase : int = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ (f"""{prefix}cls_token""", "beit.embeddings.cls_token"), (f"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"), (f"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"), (f"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ("mask_token", "beit.embeddings.mask_token"), ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) else: # layernorm + classification head rename_keys.extend( [ ("fc_norm.weight", "beit.pooler.layernorm.weight"), ("fc_norm.bias", "beit.pooler.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): lowerCamelCase : Optional[Any] = "backbone." if is_semantic else "" # queries, keys and values lowerCamelCase : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" ) lowerCamelCase : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" ) lowerCamelCase : Tuple = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" ) lowerCamelCase : str = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase : Any = q_bias lowerCamelCase : Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase : Optional[int] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase : int = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowerCamelCase : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" ) lowerCamelCase : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" ) lowerCamelCase : int = gamma_a lowerCamelCase : Optional[Any] = gamma_a def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Optional[Any] = dct.pop(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : List[Any] = val def lowercase_( ): '''simple docstring''' lowerCamelCase : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): '''simple docstring''' lowerCamelCase : List[Any] = False if "rvlcdip" in checkpoint_url else True lowerCamelCase : str = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE_ , use_mask_token=SCREAMING_SNAKE_CASE_ ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowerCamelCase : Union[str, Any] = 1024 lowerCamelCase : Any = 4096 lowerCamelCase : str = 24 lowerCamelCase : List[Any] = 16 # labels if "rvlcdip" in checkpoint_url: lowerCamelCase : Optional[Any] = 16 lowerCamelCase : Tuple = "huggingface/label-files" lowerCamelCase : List[str] = "rvlcdip-id2label.json" lowerCamelCase : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) lowerCamelCase : Any = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} lowerCamelCase : Tuple = idalabel lowerCamelCase : Dict = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowerCamelCase : int = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"] lowerCamelCase : Tuple = create_rename_keys(SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ ) # load HuggingFace model lowerCamelCase : List[Any] = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE_ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE_ ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # Check outputs on an image lowerCamelCase : str = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Any = prepare_img() lowerCamelCase : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ) lowerCamelCase : Optional[Any] = encoding["pixel_values"] lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Dict = outputs.logits # verify logits lowerCamelCase : List[Any] = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE_ ), "Shape of logits not as expected" Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: if has_lm_head: lowerCamelCase : Optional[Any] = "dit-base" if "base" in checkpoint_url else "dit-large" else: lowerCamelCase : Dict = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip" image_processor.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) model.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) _snake_case = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
283
0
import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]: torch.manual_seed(0 ) lowerCamelCase_ = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]: lowerCamelCase_ = self.dummy_uncond_unet lowerCamelCase_ = PNDMScheduler() lowerCamelCase_ = PNDMPipeline(unet=lowercase , scheduler=lowercase ) pndm.to(lowercase ) pndm.set_progress_bar_config(disable=lowercase ) lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = pndm(generator=lowercase , num_inference_steps=20 , output_type="numpy" ).images lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = pndm(generator=lowercase , num_inference_steps=20 , output_type="numpy" , return_dict=lowercase )[0] lowerCamelCase_ = image[0, -3:, -3:, -1] lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def SCREAMING_SNAKE_CASE_( self ) -> int: lowerCamelCase_ = "google/ddpm-cifar10-32" lowerCamelCase_ = UNetaDModel.from_pretrained(lowercase ) lowerCamelCase_ = PNDMScheduler() lowerCamelCase_ = PNDMPipeline(unet=lowercase , scheduler=lowercase ) pndm.to(lowercase ) pndm.set_progress_bar_config(disable=lowercase ) lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = pndm(generator=lowercase , output_type="numpy" ).images lowerCamelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
19
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Dict = ["image_processor", "tokenizer"] __A : Dict = "BridgeTowerImageProcessor" __A : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , __A , __A ): """simple docstring""" super().__init__(__A , __A ) def __call__( self , __A , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ): """simple docstring""" lowerCamelCase : str = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) # add pixel_values + pixel_mask lowerCamelCase : int = self.image_processor( __A , return_tensors=__A , do_normalize=__A , do_center_crop=__A , **__A ) encoding.update(__A ) return encoding def _snake_case ( self , *__A , **__A ): """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self , *__A , **__A ): """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = self.tokenizer.model_input_names lowerCamelCase : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
283
0
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> list[list]: lowercase : Dict = current_set.copy() for row_index, row in enumerate(SCREAMING_SNAKE_CASE__ ): lowercase : Tuple = row[0] for column_index, column in enumerate(SCREAMING_SNAKE_CASE__ ): if magnitude == 0: lowercase : Union[str, Any] = column continue lowercase : List[Any] = column / magnitude # Subtract to cancel term lowercase : Tuple = current_set[0] lowercase : Tuple = [first_row] lowercase : Union[str, Any] = current_set[1::] for row in current_set: lowercase : Any = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(SCREAMING_SNAKE_CASE__ ) continue for column_index in range(len(SCREAMING_SNAKE_CASE__ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(SCREAMING_SNAKE_CASE__ ) # Create next recursion iteration set if len(final_set[0] ) != 3: lowercase : List[str] = final_set[0] lowercase : Dict = [] lowercase : str = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) lowercase : Dict = simplify(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , SCREAMING_SNAKE_CASE__ ) lowercase : Any = resultant return final_set def _snake_case( SCREAMING_SNAKE_CASE__ ) -> list: if len(SCREAMING_SNAKE_CASE__ ) == 0: raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) lowercase : str = len(SCREAMING_SNAKE_CASE__ ) + 1 if any(len(SCREAMING_SNAKE_CASE__ ) != _length for item in equations ): raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) for row in equations: if any(not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ) for column in row ): raise ValueError("""solve_simultaneous() requires lists of integers""" ) if len(SCREAMING_SNAKE_CASE__ ) == 1: return [equations[0][-1] / equations[0][0]] lowercase : str = equations.copy() if any(0 in row for row in data_set ): lowercase : Dict = data_set.copy() lowercase : int = [] for row_index, row in enumerate(SCREAMING_SNAKE_CASE__ ): if 0 not in row: lowercase : Tuple = data_set.pop(SCREAMING_SNAKE_CASE__ ) break if not full_row: raise ValueError("""solve_simultaneous() requires at least 1 full equation""" ) data_set.insert(0 , SCREAMING_SNAKE_CASE__ ) lowercase : Any = data_set.copy() lowercase : List[str] = simplify(SCREAMING_SNAKE_CASE__ ) lowercase : int = simplified[::-1] lowercase : list = [] for row in simplified: lowercase : List[Any] = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue lowercase : int = row.copy()[: len(SCREAMING_SNAKE_CASE__ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(SCREAMING_SNAKE_CASE__ ) == 0: solutions.append(0 ) continue lowercase : Optional[Any] = temp_row[1::] lowercase : Dict = temp_row[::-1] for column_index, column in enumerate(SCREAMING_SNAKE_CASE__ ): current_solution -= column * solutions[column_index] solutions.append(SCREAMING_SNAKE_CASE__ ) lowercase : Any = [] for item in solutions: final.append(float(round(SCREAMING_SNAKE_CASE__ , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowercase : Any = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
20
def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 , 0 , -1 ): lowerCamelCase : Tuple = False for j in range(SCREAMING_SNAKE_CASE_ , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: lowerCamelCase , lowerCamelCase : int = unsorted[j - 1], unsorted[j] lowerCamelCase : Optional[int] = True for j in range(SCREAMING_SNAKE_CASE_ ): if unsorted[j] > unsorted[j + 1]: lowerCamelCase , lowerCamelCase : Union[str, Any] = unsorted[j + 1], unsorted[j] lowerCamelCase : Optional[Any] = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() _snake_case = input('''Enter numbers separated by a comma:\n''').strip() _snake_case = [int(item) for item in user_input.split(''',''')] print(f'''{cocktail_shaker_sort(unsorted) = }''')
283
0
import mpmath # for roots of unity import numpy as np class _lowerCamelCase: def __init__( self, lowerCamelCase=None, lowerCamelCase=None) -> Optional[int]: """simple docstring""" _lowercase : Optional[int] = list(poly_a or [0])[:] _lowercase : int = list(poly_b or [0])[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _lowercase : Optional[Any] = len(self.polyA) while self.polyB[-1] == 0: self.polyB.pop() _lowercase : Union[str, Any] = len(self.polyB) # Add 0 to make lengths equal a power of 2 _lowercase : Optional[Any] = int( 2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1))) while len(self.polyA) < self.c_max_length: self.polyA.append(0) while len(self.polyB) < self.c_max_length: self.polyB.append(0) # A complex root used for the fourier transform _lowercase : str = complex(mpmath.root(x=1, n=self.c_max_length, k=1)) # The product _lowercase : Optional[Any] = self.__multiply() def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]: """simple docstring""" _lowercase : List[str] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB] # Corner case if len(lowerCamelCase) <= 1: return dft[0] # _lowercase : Optional[Any] = self.c_max_length // 2 while next_ncol > 0: _lowercase : Any = [[] for i in range(lowerCamelCase)] _lowercase : List[str] = self.root**next_ncol # First half of next step _lowercase : str = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(lowerCamelCase): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j]) current_root *= root # Second half of next step _lowercase : Any = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(lowerCamelCase): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j]) current_root *= root # Update _lowercase : str = new_dft _lowercase : Union[str, Any] = next_ncol // 2 return dft[0] def UpperCamelCase ( self) -> str: """simple docstring""" _lowercase : List[str] = self.__dft('A') _lowercase : str = self.__dft('B') _lowercase : int = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]] del dft_a del dft_b # Corner Case if len(inverce_c[0]) <= 1: return inverce_c[0] # Inverse DFT _lowercase : int = 2 while next_ncol <= self.c_max_length: _lowercase : Optional[int] = [[] for i in range(lowerCamelCase)] _lowercase : Union[str, Any] = self.root ** (next_ncol // 2) _lowercase : int = 1 # First half of next step for j in range(self.c_max_length // next_ncol): for i in range(next_ncol // 2): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root)) current_root *= root # Update _lowercase : Optional[Any] = new_inverse_c next_ncol *= 2 # Unpack _lowercase : Any = [round(x[0].real, 8) + round(x[0].imag, 8) * 1J for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self) -> Optional[Any]: """simple docstring""" _lowercase : List[str] = 'A = ' + ' + '.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A])) _lowercase : int = 'B = ' + ' + '.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B])) _lowercase : Optional[int] = 'A*B = ' + ' + '.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.product)) return F'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
21
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ): '''simple docstring''' __A : Tuple = StableUnCLIPPipeline __A : Optional[int] = TEXT_TO_IMAGE_PARAMS __A : str = TEXT_TO_IMAGE_BATCH_PARAMS __A : int = TEXT_TO_IMAGE_IMAGE_PARAMS __A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false __A : Union[str, Any] = False def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[str] = 32 lowerCamelCase : Dict = embedder_hidden_size # prior components torch.manual_seed(0 ) lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowerCamelCase : Optional[int] = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=__A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) lowerCamelCase : List[Any] = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__A , num_layers=1 , ) torch.manual_seed(0 ) lowerCamelCase : Dict = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=__A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) lowerCamelCase : Optional[int] = StableUnCLIPImageNormalizer(embedding_dim=__A ) lowerCamelCase : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) lowerCamelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowerCamelCase : str = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) lowerCamelCase : Any = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__A , layers_per_block=1 , upcast_attention=__A , use_linear_projection=__A , ) torch.manual_seed(0 ) lowerCamelCase : int = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=__A , steps_offset=1 , ) torch.manual_seed(0 ) lowerCamelCase : Optional[Any] = AutoencoderKL() lowerCamelCase : Optional[int] = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def _snake_case ( self , __A , __A=0 ): """simple docstring""" if str(__A ).startswith("mps" ): lowerCamelCase : Optional[int] = torch.manual_seed(__A ) else: lowerCamelCase : Optional[Any] = torch.Generator(device=__A ).manual_seed(__A ) lowerCamelCase : Tuple = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=__A ) @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) lowerCamelCase : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase : Dict = pipe("anime turle" , generator=__A , output_type="np" ) lowerCamelCase : Dict = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__A , __A ) def _snake_case ( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) lowerCamelCase : Union[str, Any] = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase : Any = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) lowerCamelCase : List[str] = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
283
0
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar __SCREAMING_SNAKE_CASE :Optional[int] = TypeVar('''T''') class A_ ( Generic[T] ): def __init__( self : List[Any] , snake_case_ : list[T] , snake_case_ : Callable[[T, T], T] ): _UpperCAmelCase = None _UpperCAmelCase = len(snake_case_ ) _UpperCAmelCase = [any_type for _ in range(self.N )] + arr _UpperCAmelCase = fnc self.build() def lowercase ( self : List[Any] ): for p in range(self.N - 1 , 0 , -1 ): _UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : T ): p += self.N _UpperCAmelCase = v while p > 1: _UpperCAmelCase = p // 2 _UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def lowercase ( self : Any , snake_case_ : int , snake_case_ : int ): # noqa: E741 _UpperCAmelCase , _UpperCAmelCase = l + self.N, r + self.N _UpperCAmelCase = None while l <= r: if l % 2 == 1: _UpperCAmelCase = self.st[l] if res is None else self.fn(snake_case_ , self.st[l] ) if r % 2 == 0: _UpperCAmelCase = self.st[r] if res is None else self.fn(snake_case_ , self.st[r] ) _UpperCAmelCase , _UpperCAmelCase = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce __SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] __SCREAMING_SNAKE_CASE :List[str] = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, min) __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, max) __SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, lambda a, b: a + b) def UpperCAmelCase_ ( ) -> None: '''simple docstring''' for i in range(len(__lowercase ) ): for j in range(__lowercase , len(__lowercase ) ): _UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] ) _UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] ) _UpperCAmelCase = reduce(lambda __lowercase , __lowercase : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(__lowercase , __lowercase ) assert max_range == max_segment_tree.query(__lowercase , __lowercase ) assert sum_range == sum_segment_tree.query(__lowercase , __lowercase ) test_all_segments() for index, value in test_updates.items(): __SCREAMING_SNAKE_CASE :str = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
22
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _snake_case = { '''configuration_squeezebert''': [ '''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SqueezeBertConfig''', '''SqueezeBertOnnxConfig''', ], '''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''SqueezeBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SqueezeBertForMaskedLM''', '''SqueezeBertForMultipleChoice''', '''SqueezeBertForQuestionAnswering''', '''SqueezeBertForSequenceClassification''', '''SqueezeBertForTokenClassification''', '''SqueezeBertModel''', '''SqueezeBertModule''', '''SqueezeBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
283
0
'''simple docstring''' UpperCamelCase__: List[str] = [0, 2, 4, 6, 8] UpperCamelCase__: Any = [1, 3, 5, 7, 9] def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[int] , _lowerCAmelCase : int ) -> int: if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 UpperCAmelCase : Union[str, Any] = 0 for digit in range(10 ): UpperCAmelCase : Optional[Any] = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , _lowerCAmelCase , _lowerCAmelCase ) return result UpperCAmelCase : str = 0 for digita in range(10 ): UpperCAmelCase : List[Any] = digita if (remainder + digita) % 2 == 0: UpperCAmelCase : int = ODD_DIGITS else: UpperCAmelCase : Any = EVEN_DIGITS for digita in other_parity_digits: UpperCAmelCase : List[str] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCAmelCase , _lowerCAmelCase , ) return result def snake_case_ ( _lowerCAmelCase : int = 9 ) -> int: UpperCAmelCase : Union[str, Any] = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(_lowerCAmelCase , 0 , [0] * length , _lowerCAmelCase ) return result if __name__ == "__main__": print(F"{solution() = }")
23
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''edbeeching/decision-transformer-gym-hopper-medium''': ( '''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json''' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : str = "decision_transformer" __A : Union[str, Any] = ["past_key_values"] __A : Optional[int] = { "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , __A=17 , __A=4 , __A=128 , __A=4096 , __A=True , __A=1 , __A=1024 , __A=3 , __A=1 , __A=None , __A="relu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=1e-5 , __A=0.02 , __A=True , __A=True , __A=5_0256 , __A=5_0256 , __A=False , __A=False , **__A , ): """simple docstring""" lowerCamelCase : List[str] = state_dim lowerCamelCase : Tuple = act_dim lowerCamelCase : List[str] = hidden_size lowerCamelCase : Optional[Any] = max_ep_len lowerCamelCase : Union[str, Any] = action_tanh lowerCamelCase : int = vocab_size lowerCamelCase : List[Any] = n_positions lowerCamelCase : Dict = n_layer lowerCamelCase : int = n_head lowerCamelCase : List[Any] = n_inner lowerCamelCase : Any = activation_function lowerCamelCase : Optional[int] = resid_pdrop lowerCamelCase : str = embd_pdrop lowerCamelCase : Tuple = attn_pdrop lowerCamelCase : List[Any] = layer_norm_epsilon lowerCamelCase : Dict = initializer_range lowerCamelCase : Optional[int] = scale_attn_weights lowerCamelCase : List[Any] = use_cache lowerCamelCase : Tuple = scale_attn_by_inverse_layer_idx lowerCamelCase : Optional[int] = reorder_and_upcast_attn lowerCamelCase : Dict = bos_token_id lowerCamelCase : Any = eos_token_id super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
283
0
import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : Union[str, Any] = ComputeEnvironment.AMAZON_SAGEMAKER A_ : List[Any] = True A_ : Tuple = 'ml.p3.2xlarge' A_ : Union[str, Any] = 'accelerate_sagemaker_execution_role' A_ : str = 'hf-sm' A_ : str = 'us-east-1' A_ : Tuple = 1 A_ : int = 'accelerate-sagemaker-1' A_ : Union[str, Any] = '1.6' A_ : Tuple = '4.4' A_ : Optional[Any] = 'train.py' A_ : Optional[Any] = [ '--model_name_or_path', 'bert', '--do_train', 'False', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] A_ : Dict = [ '--model_name_or_path', 'bert', '--do_train', '--do_test', 'False', '--do_predict', '--epochs', '3', '--learning_rate', '5e-5', '--max_steps', '50.5', ] class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Dict ): """simple docstring""" __snake_case = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args['''model_name_or_path'''] , a__ ) assert isinstance(converted_args['''do_train'''] , a__ ) assert isinstance(converted_args['''epochs'''] , a__ ) assert isinstance(converted_args['''learning_rate'''] , a__ ) assert isinstance(converted_args['''max_steps'''] , a__ ) with pytest.raises(a__ ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
24
def lowercase_( SCREAMING_SNAKE_CASE_ = 4000000 ): '''simple docstring''' lowerCamelCase : Any = [0, 1] lowerCamelCase : Union[str, Any] = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 lowerCamelCase : Union[str, Any] = 0 for j in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f'''{solution() = }''')
283
0
"""simple docstring""" import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ (a__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase : List[str] = CodeGenTokenizer __UpperCamelCase : int = CodeGenTokenizerFast __UpperCamelCase : List[str] = True __UpperCamelCase : Optional[Any] = {'''add_prefix_space''': True} __UpperCamelCase : str = False def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE__ : List[Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", """<|endoftext|>""", ] SCREAMING_SNAKE_CASE__ : Any = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) SCREAMING_SNAKE_CASE__ : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] SCREAMING_SNAKE_CASE__ : List[str] = {"""unk_token""": """<unk>"""} SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) SCREAMING_SNAKE_CASE__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) ) def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = """lower newer""" SCREAMING_SNAKE_CASE__ : Optional[Any] = """lower newer""" return input_text, output_text def __magic_name__ (self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) SCREAMING_SNAKE_CASE__ : int = """lower newer""" SCREAMING_SNAKE_CASE__ : Dict = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] SCREAMING_SNAKE_CASE__ : int = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE__ : Tuple = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ : Any = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : str = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = """lower newer""" # Testing tokenization SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing conversion to ids without special tokens SCREAMING_SNAKE_CASE__ : int = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing conversion to ids with special tokens SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing the unknown token SCREAMING_SNAKE_CASE__ : str = tokens + [rust_tokenizer.unk_token] SCREAMING_SNAKE_CASE__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" pass def __magic_name__ (self , SCREAMING_SNAKE_CASE__=15 ) -> List[str]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE__ : Tuple = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) # Simple input SCREAMING_SNAKE_CASE__ : Any = """This is a simple input""" SCREAMING_SNAKE_CASE__ : Tuple = ["""This is a simple input 1""", """This is a simple input 2"""] SCREAMING_SNAKE_CASE__ : Dict = ("""This is a simple input""", """This is a pair""") SCREAMING_SNAKE_CASE__ : Optional[int] = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" ) # Simple input self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" ) # Simple input self.assertRaises( SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" ) # Pair input self.assertRaises( SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , ) def __magic_name__ (self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" ) # Simple input SCREAMING_SNAKE_CASE__ : str = """This is a simple input""" SCREAMING_SNAKE_CASE__ : int = ["""This is a simple input looooooooong""", """This is a simple input"""] SCREAMING_SNAKE_CASE__ : str = ("""This is a simple input""", """This is a pair""") SCREAMING_SNAKE_CASE__ : List[Any] = [ ("""This is a simple input loooooong""", """This is a simple input"""), ("""This is a simple pair loooooong""", """This is a simple pair"""), ] SCREAMING_SNAKE_CASE__ : Dict = tokenizer.pad_token_id SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ) SCREAMING_SNAKE_CASE__ : Dict = tokenizer(*SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ) # s # test single string max_length padding self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["""input_ids"""] ) self.assertTrue(0 in out_s["""attention_mask"""] ) # s2 # test automatic padding self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] ) self.assertFalse(0 in out_sa["""attention_mask"""][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] ) self.assertTrue(0 in out_sa["""attention_mask"""][1] ) # p # test single pair max_length padding self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["""input_ids"""] ) self.assertTrue(0 in out_p["""attention_mask"""] ) # p2 # test automatic padding pair self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] ) self.assertFalse(0 in out_pa["""attention_mask"""][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] ) self.assertTrue(0 in out_pa["""attention_mask"""][1] ) def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = """$$$""" SCREAMING_SNAKE_CASE__ : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE__ , add_bos_token=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = """This is a simple input""" SCREAMING_SNAKE_CASE__ : List[str] = ["""This is a simple input 1""", """This is a simple input 2"""] SCREAMING_SNAKE_CASE__ : Dict = tokenizer.bos_token_id SCREAMING_SNAKE_CASE__ : Any = tokenizer(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = tokenizer(SCREAMING_SNAKE_CASE__ ) self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE__ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.decode(out_s.input_ids ) SCREAMING_SNAKE_CASE__ : int = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE__ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def __magic_name__ (self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" ) SCREAMING_SNAKE_CASE__ : str = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#""" SCREAMING_SNAKE_CASE__ : int = """\nif len_a > len_b: result = a\nelse: result = b""" SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[str] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""] SCREAMING_SNAKE_CASE__ : Any = tokenizer.decode(SCREAMING_SNAKE_CASE__ , truncate_before_pattern=SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> Dict: """simple docstring""" pass
25
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _snake_case = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
283
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json", "uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json", "uclanlp/visualbert-vqa-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json" ), "uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json", "uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json", "uclanlp/visualbert-vcr-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json" ), "uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json", "uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json", "uclanlp/visualbert-nlvr2-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json" ) # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } class lowercase ( UpperCamelCase__ ): _a = "visual_bert" def __init__( self , _a=3_0522 , _a=768 , _a=512 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=False , _a=True , _a=1 , _a=0 , _a=2 , **_a , ) -> Tuple: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) _A : int = vocab_size _A : Dict = max_position_embeddings _A : Optional[Any] = hidden_size _A : List[Any] = visual_embedding_dim _A : Optional[Any] = num_hidden_layers _A : Tuple = num_attention_heads _A : str = intermediate_size _A : Dict = hidden_act _A : Union[str, Any] = hidden_dropout_prob _A : Optional[Any] = attention_probs_dropout_prob _A : Optional[int] = initializer_range _A : List[Any] = type_vocab_size _A : int = layer_norm_eps _A : Optional[int] = bypass_transformer _A : List[Any] = special_visual_initialize
26
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline _snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False) parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''') parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''') _snake_case = parser.parse_args() _snake_case = '''cpu''' _snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings''' _snake_case = '''path-to-your-trained-model''' _snake_case = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: _snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) _snake_case = pipe.to(device) # to channels last _snake_case = pipe.unet.to(memory_format=torch.channels_last) _snake_case = pipe.vae.to(memory_format=torch.channels_last) _snake_case = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: _snake_case = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex _snake_case = torch.randn(2, 4, 64, 64) _snake_case = torch.rand(1) * 9_99 _snake_case = torch.randn(2, 77, 7_68) _snake_case = (sample, timestep, encoder_hidden_status) try: _snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: _snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) _snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) _snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: _snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute _snake_case = 6_66 _snake_case = torch.Generator(device).manual_seed(seed) _snake_case = {'''generator''': generator} if args.steps is not None: _snake_case = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): _snake_case = pipe(prompt, **generate_kwargs).images[0] # save image image.save('''generated.png''')
283
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase : str = { 'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'], 'configuration_data2vec_text': [ 'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecTextConfig', 'Data2VecTextOnnxConfig', ], 'configuration_data2vec_vision': [ 'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecVisionConfig', 'Data2VecVisionOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : str = [ 'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecAudioForAudioFrameClassification', 'Data2VecAudioForCTC', 'Data2VecAudioForSequenceClassification', 'Data2VecAudioForXVector', 'Data2VecAudioModel', 'Data2VecAudioPreTrainedModel', ] __lowercase : Tuple = [ 'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecTextForCausalLM', 'Data2VecTextForMaskedLM', 'Data2VecTextForMultipleChoice', 'Data2VecTextForQuestionAnswering', 'Data2VecTextForSequenceClassification', 'Data2VecTextForTokenClassification', 'Data2VecTextModel', 'Data2VecTextPreTrainedModel', ] __lowercase : Dict = [ 'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecVisionForImageClassification', 'Data2VecVisionForMaskedImageModeling', 'Data2VecVisionForSemanticSegmentation', 'Data2VecVisionModel', 'Data2VecVisionPreTrainedModel', ] if is_tf_available(): __lowercase : Optional[Any] = [ 'TFData2VecVisionForImageClassification', 'TFData2VecVisionForSemanticSegmentation', 'TFData2VecVisionModel', 'TFData2VecVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class UpperCAmelCase_ : '''simple docstring''' def __init__( self , __A , ): """simple docstring""" lowerCamelCase : str = parent lowerCamelCase : Union[str, Any] = 13 lowerCamelCase : Optional[Any] = 7 lowerCamelCase : List[str] = True lowerCamelCase : Optional[int] = True lowerCamelCase : Union[str, Any] = True lowerCamelCase : List[Any] = True lowerCamelCase : Tuple = True lowerCamelCase : Any = False lowerCamelCase : int = False lowerCamelCase : Tuple = False lowerCamelCase : Union[str, Any] = 2 lowerCamelCase : Dict = 99 lowerCamelCase : Tuple = 0 lowerCamelCase : Any = 32 lowerCamelCase : List[Any] = 2 lowerCamelCase : Tuple = 4 lowerCamelCase : List[str] = 0.1 lowerCamelCase : int = 0.1 lowerCamelCase : int = 512 lowerCamelCase : List[Any] = 16 lowerCamelCase : Any = 2 lowerCamelCase : Any = 0.02 lowerCamelCase : List[str] = 3 lowerCamelCase : Tuple = 4 lowerCamelCase : int = "last" lowerCamelCase : int = True lowerCamelCase : Dict = None lowerCamelCase : Tuple = 0 def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) lowerCamelCase : Tuple = None if self.use_input_lengths: lowerCamelCase : Optional[Any] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCamelCase : str = None if self.use_token_type_ids: lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowerCamelCase : Dict = None lowerCamelCase : Dict = None lowerCamelCase : Tuple = None if self.use_labels: lowerCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase : int = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase : List[Any] = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Optional[Any] = TFFlaubertModel(config=__A ) lowerCamelCase : Any = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} lowerCamelCase : Dict = model(__A ) lowerCamelCase : Any = [input_ids, input_mask] lowerCamelCase : Tuple = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : int = TFFlaubertWithLMHeadModel(__A ) lowerCamelCase : List[str] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} lowerCamelCase : int = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(__A ) lowerCamelCase : Optional[int] = {"input_ids": input_ids, "lengths": input_lengths} lowerCamelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Optional[int] = TFFlaubertForSequenceClassification(__A ) lowerCamelCase : str = {"input_ids": input_ids, "lengths": input_lengths} lowerCamelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Tuple = self.num_labels lowerCamelCase : Optional[Any] = TFFlaubertForTokenClassification(config=__A ) lowerCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCamelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Any = self.num_choices lowerCamelCase : Optional[Any] = TFFlaubertForMultipleChoice(config=__A ) lowerCamelCase : Tuple = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase : int = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase : List[str] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase : Optional[int] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } lowerCamelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = self.prepare_config_and_inputs() ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Optional[Any] = config_and_inputs lowerCamelCase : List[Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "langs": token_type_ids, "lengths": input_lengths, } return config, inputs_dict @require_tf class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): '''simple docstring''' __A : str = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) __A : Dict = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable __A : Any = ( { "feature-extraction": TFFlaubertModel, "fill-mask": TFFlaubertWithLMHeadModel, "question-answering": TFFlaubertForQuestionAnsweringSimple, "text-classification": TFFlaubertForSequenceClassification, "token-classification": TFFlaubertForTokenClassification, "zero-shot": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) __A : List[str] = False __A : List[str] = False def _snake_case ( self , __A , __A , __A , __A , __A ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _snake_case ( self ): """simple docstring""" lowerCamelCase : Tuple = TFFlaubertModelTester(self ) lowerCamelCase : Optional[int] = ConfigTester(self , config_class=__A , emb_dim=37 ) def _snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*__A ) @slow def _snake_case ( self ): """simple docstring""" for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : int = TFFlaubertModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @require_tf @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[int] = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" ) lowerCamelCase : str = tf.convert_to_tensor( [[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" lowerCamelCase : Dict = model(__A )[0] lowerCamelCase : List[str] = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape , __A ) # compare the actual values for a slice. lowerCamelCase : Tuple = tf.convert_to_tensor( [ [ [-1.8768773, -1.566555, 0.27072418], [-1.6920038, -0.5873505, 1.9329599], [-2.9563985, -1.6993835, 1.7972052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
283
0
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""] _SCREAMING_SNAKE_CASE = """ViltImageProcessor""" _SCREAMING_SNAKE_CASE = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : Any , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : str ): """simple docstring""" UpperCamelCase = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , UpperCamelCase__ , ) UpperCamelCase = kwargs.pop('feature_extractor' ) UpperCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = self.image_processor def __call__( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Optional[Any] , ): """simple docstring""" UpperCamelCase = self.tokenizer( text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , ) # add pixel_values + pixel_mask UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) encoding.update(UpperCamelCase__ ) return encoding def A ( self : int , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : str ): """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ ) def A ( self : Tuple , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[int] ): """simple docstring""" return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ ) @property def A ( self : str ): """simple docstring""" UpperCamelCase = self.tokenizer.model_input_names UpperCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def A ( self : Union[str, Any] ): """simple docstring""" warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase__ , ) return self.image_processor_class @property def A ( self : Optional[Any] ): """simple docstring""" warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase__ , ) return self.image_processor
28
import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[] ): '''simple docstring''' lowerCamelCase : Optional[Any] = size[0] - overlap_pixels * 2 lowerCamelCase : int = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels lowerCamelCase : Tuple = np.ones((size_y, size_x) , dtype=np.uinta ) * 255 lowerCamelCase : List[Any] = np.pad(SCREAMING_SNAKE_CASE_ , mode="linear_ramp" , pad_width=SCREAMING_SNAKE_CASE_ , end_values=0 ) if "l" in remove_borders: lowerCamelCase : Optional[Any] = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: lowerCamelCase : List[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: lowerCamelCase : List[Any] = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: lowerCamelCase : Tuple = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return max(SCREAMING_SNAKE_CASE_ , min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Optional[Any] = list(SCREAMING_SNAKE_CASE_ ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap lowerCamelCase : Any = clamp_rect(SCREAMING_SNAKE_CASE_ , [0, 0] , [image_size[0], image_size[1]] ) return rect def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Dict = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(SCREAMING_SNAKE_CASE_ , (original_slice, 0) ) return result def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) lowerCamelCase : int = tile.crop(SCREAMING_SNAKE_CASE_ ) return tile def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : int = n % d return n - divisor class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' def __init__( self , __A , __A , __A , __A , __A , __A , __A = 350 , ): """simple docstring""" super().__init__( vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , low_res_scheduler=__A , scheduler=__A , max_noise_level=__A , ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , **__A ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase : Tuple = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) lowerCamelCase : Union[str, Any] = add_overlap_rect(__A , __A , image.size ) lowerCamelCase : List[str] = image.crop(__A ) lowerCamelCase : Optional[int] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] lowerCamelCase : int = translated_slice_x - (original_image_slice / 2) lowerCamelCase : Optional[Any] = max(0 , __A ) lowerCamelCase : Tuple = squeeze_tile(__A , __A , __A , __A ) lowerCamelCase : Dict = to_input.size lowerCamelCase : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) lowerCamelCase : Dict = super(__A , self ).__call__(image=__A , **__A ).images[0] lowerCamelCase : Tuple = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) lowerCamelCase : Optional[Any] = unsqueeze_tile(__A , __A ) lowerCamelCase : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) lowerCamelCase : int = [] if x == 0: remove_borders.append("l" ) elif crop_rect[2] == image.size[0]: remove_borders.append("r" ) if y == 0: remove_borders.append("t" ) elif crop_rect[3] == image.size[1]: remove_borders.append("b" ) lowerCamelCase : int = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__A ) , mode="L" , ) final_image.paste( __A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __A ) @torch.no_grad() def __call__( self , __A , __A , __A = 75 , __A = 9.0 , __A = 50 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = None , __A = 1 , __A = 128 , __A = 32 , __A = 32 , ): """simple docstring""" lowerCamelCase : Dict = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) ) lowerCamelCase : Union[str, Any] = math.ceil(image.size[0] / tile_size ) lowerCamelCase : Dict = math.ceil(image.size[1] / tile_size ) lowerCamelCase : str = tcx * tcy lowerCamelCase : int = 0 for y in range(__A ): for x in range(__A ): self._process_tile( __A , __A , __A , __A , __A , __A , __A , prompt=__A , num_inference_steps=__A , guidance_scale=__A , noise_level=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , ) current_count += 1 if callback is not None: callback({"progress": current_count / total_tile_count, "image": final_image} ) return final_image def lowercase_( ): '''simple docstring''' lowerCamelCase : Dict = "stabilityai/stable-diffusion-x4-upscaler" lowerCamelCase : Union[str, Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , revision="fp16" , torch_dtype=torch.floataa ) lowerCamelCase : Optional[Any] = pipe.to("cuda" ) lowerCamelCase : List[str] = Image.open("../../docs/source/imgs/diffusers_library.jpg" ) def callback(SCREAMING_SNAKE_CASE_ ): print(f"""progress: {obj['progress']:.4f}""" ) obj["image"].save("diffusers_library_progress.jpg" ) lowerCamelCase : int = pipe(image=SCREAMING_SNAKE_CASE_ , prompt="Black font, white background, vector" , noise_level=40 , callback=SCREAMING_SNAKE_CASE_ ) final_image.save("diffusers_library.jpg" ) if __name__ == "__main__": main()
283
0
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Any = DistilBertTokenizer _snake_case : int = DistilBertTokenizerFast _snake_case : Optional[int] = True @slow def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Any = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' ) UpperCAmelCase_ : str = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
29
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Dict = "mobilenet_v2" def __init__( self , __A=3 , __A=224 , __A=1.0 , __A=8 , __A=8 , __A=6 , __A=32 , __A=True , __A=True , __A="relu6" , __A=True , __A=0.8 , __A=0.02 , __A=0.001 , __A=255 , **__A , ): """simple docstring""" super().__init__(**__A ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) lowerCamelCase : str = num_channels lowerCamelCase : Any = image_size lowerCamelCase : Union[str, Any] = depth_multiplier lowerCamelCase : Tuple = depth_divisible_by lowerCamelCase : Dict = min_depth lowerCamelCase : Dict = expand_ratio lowerCamelCase : Optional[Any] = output_stride lowerCamelCase : int = first_layer_is_expansion lowerCamelCase : Union[str, Any] = finegrained_output lowerCamelCase : Optional[Any] = hidden_act lowerCamelCase : Optional[Any] = tf_padding lowerCamelCase : Optional[Any] = classifier_dropout_prob lowerCamelCase : Dict = initializer_range lowerCamelCase : str = layer_norm_eps lowerCamelCase : Optional[Any] = semantic_loss_ignore_index class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Union[str, Any] = version.parse("1.11" ) @property def _snake_case ( self ): """simple docstring""" return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _snake_case ( self ): """simple docstring""" if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _snake_case ( self ): """simple docstring""" return 1e-4
283
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __a = { 'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'], 'tokenization_xlm': ['XLMTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'XLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMForMultipleChoice', 'XLMForQuestionAnswering', 'XLMForQuestionAnsweringSimple', 'XLMForSequenceClassification', 'XLMForTokenClassification', 'XLMModel', 'XLMPreTrainedModel', 'XLMWithLMHeadModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLMForMultipleChoice', 'TFXLMForQuestionAnsweringSimple', 'TFXLMForSequenceClassification', 'TFXLMForTokenClassification', 'TFXLMMainLayer', 'TFXLMModel', 'TFXLMPreTrainedModel', 'TFXLMWithLMHeadModel', ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) _snake_case = 2_99_79_24_58 # Symbols _snake_case , _snake_case , _snake_case , _snake_case = symbols('''ct x y z''') def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if velocity > c: raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError("Speed must be greater than or equal to 1!" ) return velocity / c def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_ ) ** 2 ) def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return np.array( [ [gamma(SCREAMING_SNAKE_CASE_ ), -gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), 0, 0], [-gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), gamma(SCREAMING_SNAKE_CASE_ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' if event is None: lowerCamelCase : Tuple = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(SCREAMING_SNAKE_CASE_ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: _snake_case = transform(29_97_92_45) print('''Example of four vector: ''') print(f'''ct\' = {four_vector[0]}''') print(f'''x\' = {four_vector[1]}''') print(f'''y\' = {four_vector[2]}''') print(f'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values _snake_case = {ct: c, x: 1, y: 1, z: 1} _snake_case = [four_vector[i].subs(sub_dict) for i in range(4)] print(f'''\n{numerical_vector}''')
283
0
'''simple docstring''' __SCREAMING_SNAKE_CASE : Dict = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1 def UpperCamelCase_ ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float: """simple docstring""" if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def UpperCamelCase_ ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float: """simple docstring""" if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
31
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor _snake_case = logging.get_logger(__name__) class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' def __init__( self , *__A , **__A ): """simple docstring""" warnings.warn( "The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DPTImageProcessor instead." , __A , ) super().__init__(*__A , **__A )
283
0
from math import factorial, pi def SCREAMING_SNAKE_CASE_ ( __A : float , __A : int = 30 ) -> float: """simple docstring""" if not isinstance(__A , (int, float) ): raise ValueError('maclaurin_sin() requires either an int or float for theta' ) if not isinstance(__A , __A ) or accuracy <= 0: raise ValueError('maclaurin_sin() requires a positive int for accuracy' ) a_ : Tuple = float(__A ) a_ : int = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) ) def SCREAMING_SNAKE_CASE_ ( __A : float , __A : int = 30 ) -> float: """simple docstring""" if not isinstance(__A , (int, float) ): raise ValueError('maclaurin_cos() requires either an int or float for theta' ) if not isinstance(__A , __A ) or accuracy <= 0: raise ValueError('maclaurin_cos() requires a positive int for accuracy' ) a_ : Dict = float(__A ) a_ : Union[str, Any] = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) print(maclaurin_sin(-10, 15)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(10, 15)) print(maclaurin_cos(-10, 15))
32
import argparse _snake_case = '''docs/source/_static/js/custom.js''' def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' with open(SCREAMING_SNAKE_CASE_ , encoding="utf-8" , newline="\n" ) as f: lowerCamelCase : List[str] = f.readlines() lowerCamelCase : int = 0 # First let's put the right version while not lines[index].startswith("const stableVersion =" ): index += 1 lowerCamelCase : str = f"""const stableVersion = \"v{version}\"\n""" # Then update the dictionary while not lines[index].startswith("const versionMapping = {" ): index += 1 # We go until the end while not lines[index].startswith("}" ): index += 1 # We add the new version at the end lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n""" with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument('''--version''', help='''Release version.''') _snake_case = parser.parse_args() update_custom_js(args.version)
283
0
"""simple docstring""" import math class _UpperCAmelCase : def __init__( self : Union[str, Any] , A : Optional[int]=0 ) -> Union[str, Any]: # a graph with Node 0,1,...,N-1 lowercase_ : Any = n lowercase_ : Any = [ [math.inf for j in range(0 , A )] for i in range(0 , A ) ] # adjacency matrix for weight lowercase_ : List[Any] = [ [math.inf for j in range(0 , A )] for i in range(0 , A ) ] # dp[i][j] stores minimum distance from i to j def A ( self : List[str] , A : Dict , A : str , A : Optional[Any] ) -> Any: lowercase_ : Any = w def A ( self : Optional[Any] ) -> Tuple: for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): lowercase_ : str = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def A ( self : Any , A : Tuple , A : Dict ) -> str: return self.dp[u][v] if __name__ == "__main__": __A : str = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
33
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Any = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[int] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : str = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : List[Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[int] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Dict = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Dict = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Union[str, Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[int] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Any = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : int = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] )
283
0
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class _a ( __a ): __a : Dict = """""" __a : str = """hf-legacy""" # "hf://"" is reserved for hffs def __init__( self : str , lowercase : Optional[DatasetInfo] = None , lowercase : Optional[str] = None , **lowercase : Tuple , ): '''simple docstring''' super().__init__(self , **lowercase ) UpperCAmelCase = repo_info UpperCAmelCase = token UpperCAmelCase = None def A ( self : Optional[int] ): '''simple docstring''' if self.dir_cache is None: UpperCAmelCase = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes UpperCAmelCase = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(lowercase ): {'''name''': str(lowercase ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def A ( self : Tuple , lowercase : str , lowercase : str = "rb" , **lowercase : Dict , ): '''simple docstring''' if not isinstance(self.repo_info , lowercase ): raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" ) UpperCAmelCase = hf_hub_url(self.repo_info.id , lowercase , revision=self.repo_info.sha ) return fsspec.open( lowercase , mode=lowercase , headers=get_authentication_headers_for_url(lowercase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def A ( self : List[str] , lowercase : Optional[int] , **lowercase : List[Any] ): '''simple docstring''' self._get_dirs() UpperCAmelCase = self._strip_protocol(lowercase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(lowercase ) def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : Optional[Any]=False , **lowercase : Any ): '''simple docstring''' self._get_dirs() UpperCAmelCase = PurePosixPath(path.strip('''/''' ) ) UpperCAmelCase = {} for p, f in self.dir_cache.items(): UpperCAmelCase = PurePosixPath(p.strip('''/''' ) ) UpperCAmelCase = p.parent if root == path: UpperCAmelCase = f UpperCAmelCase = list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
34
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Dict = "openai/whisper-base" __A : str = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __A : Any = "transcriber" __A : Any = WhisperProcessor __A : int = WhisperForConditionalGeneration __A : Any = ["audio"] __A : List[str] = ["text"] def _snake_case ( self , __A ): """simple docstring""" return self.pre_processor(__A , return_tensors="pt" ).input_features def _snake_case ( self , __A ): """simple docstring""" return self.model.generate(inputs=__A ) def _snake_case ( self , __A ): """simple docstring""" return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0]
283
0
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __a = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } __a = {"facebook/blenderbot_small-90M": 512} def __snake_case( _lowerCAmelCase ) -> Optional[Any]: snake_case__ : Any = set() snake_case__ : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) snake_case__ : str = char snake_case__ : Dict = set(_lowerCAmelCase ) return pairs class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ["input_ids", "attention_mask"] def __init__( self : str , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : int="__start__" , snake_case_ : Optional[int]="__end__" , snake_case_ : str="__unk__" , snake_case_ : List[Any]="__null__" , **snake_case_ : int , ): super().__init__(unk_token=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , pad_token=snake_case_ , **snake_case_ ) with open(snake_case_ , encoding="""utf-8""" ) as vocab_handle: snake_case__ : Dict = json.load(snake_case_ ) snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()} with open(snake_case_ , encoding="""utf-8""" ) as merges_handle: snake_case__ : str = merges_handle.read().split("""\n""" )[1:-1] snake_case__ : Tuple = [tuple(merge.split() ) for merge in merges] snake_case__ : Any = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) ) snake_case__ : Dict = {} @property def lowerCamelCase ( self : str ): return len(self.encoder ) def lowerCamelCase ( self : List[Any] ): return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase ( self : Dict , snake_case_ : str ): if token in self.cache: return self.cache[token] snake_case__ : Tuple = re.sub("""([.,!?()])""" , r""" \1""" , snake_case_ ) snake_case__ : Optional[int] = re.sub("""(')""" , r""" \1 """ , snake_case_ ) snake_case__ : int = re.sub(r"""\s{2,}""" , """ """ , snake_case_ ) if "\n" in token: snake_case__ : int = token.replace("""\n""" , """ __newln__""" ) snake_case__ : Dict = token.split(""" """ ) snake_case__ : Union[str, Any] = [] for token in tokens: if not len(snake_case_ ): continue snake_case__ : List[str] = token.lower() snake_case__ : Dict = tuple(snake_case_ ) snake_case__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) snake_case__ : int = get_pairs(snake_case_ ) if not pairs: words.append(snake_case_ ) continue while True: snake_case__ : Tuple = min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break snake_case__ , snake_case__ : List[Any] = bigram snake_case__ : str = [] snake_case__ : Tuple = 0 while i < len(snake_case_ ): try: snake_case__ : Union[str, Any] = word.index(snake_case_ , snake_case_ ) new_word.extend(word[i:j] ) snake_case__ : List[Any] = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 snake_case__ : int = tuple(snake_case_ ) snake_case__ : Optional[Any] = new_word if len(snake_case_ ) == 1: break else: snake_case__ : str = get_pairs(snake_case_ ) snake_case__ : Optional[int] = """@@ """.join(snake_case_ ) snake_case__ : Any = word[:-4] snake_case__ : str = word words.append(snake_case_ ) return " ".join(snake_case_ ) def lowerCamelCase ( self : Tuple , snake_case_ : str ): snake_case__ : Tuple = [] snake_case__ : str = re.findall(r"""\S+\n?""" , snake_case_ ) for token in words: split_tokens.extend(list(self.bpe(snake_case_ ).split(""" """ ) ) ) return split_tokens def lowerCamelCase ( self : Optional[Any] , snake_case_ : str ): snake_case__ : Dict = token.lower() return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) ) def lowerCamelCase ( self : int , snake_case_ : int ): return self.decoder.get(snake_case_ , self.unk_token ) def lowerCamelCase ( self : Any , snake_case_ : List[str] ): snake_case__ : List[str] = """ """.join(snake_case_ ).replace("""@@ """ , """""" ).strip() return out_string def lowerCamelCase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ): if not os.path.isdir(snake_case_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return snake_case__ : Union[str, Any] = os.path.join( snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) snake_case__ : List[Any] = os.path.join( snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(snake_case_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + """\n""" ) snake_case__ : Optional[Any] = 0 with open(snake_case_ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." """ Please check that the tokenizer is not corrupted!""" ) snake_case__ : int = token_index writer.write(""" """.join(snake_case_ ) + """\n""" ) index += 1 return vocab_file, merge_file
35
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if "model" in orig_key: lowerCamelCase : Dict = orig_key.replace("model." , "" ) if "norm1" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("norm1" , "attention.output.LayerNorm" ) if "norm2" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("norm2" , "output.LayerNorm" ) if "norm" in orig_key: lowerCamelCase : Optional[Any] = orig_key.replace("norm" , "LayerNorm" ) if "transformer" in orig_key: lowerCamelCase : int = orig_key.split("." )[0].split("_" )[-1] lowerCamelCase : Dict = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: lowerCamelCase : List[str] = orig_key.replace("mha.attn" , "attention.self" ) if "mha" in orig_key: lowerCamelCase : List[Any] = orig_key.replace("mha" , "attention" ) if "W_q" in orig_key: lowerCamelCase : Optional[int] = orig_key.replace("W_q" , "self.query" ) if "W_k" in orig_key: lowerCamelCase : List[Any] = orig_key.replace("W_k" , "self.key" ) if "W_v" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("W_v" , "self.value" ) if "ff1" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("ff1" , "intermediate.dense" ) if "ff2" in orig_key: lowerCamelCase : Optional[int] = orig_key.replace("ff2" , "output.dense" ) if "ff" in orig_key: lowerCamelCase : Optional[int] = orig_key.replace("ff" , "output.dense" ) if "mlm_class" in orig_key: lowerCamelCase : Dict = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" ) if "mlm" in orig_key: lowerCamelCase : List[Any] = orig_key.replace("mlm" , "cls.predictions.transform" ) if "cls" not in orig_key: lowerCamelCase : int = "yoso." + orig_key return orig_key def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCamelCase : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if ("pooler" in key) or ("sen_class" in key): continue else: lowerCamelCase : Dict = val lowerCamelCase : Dict = orig_state_dict["cls.predictions.decoder.bias"] lowerCamelCase : Dict = torch.arange(SCREAMING_SNAKE_CASE_ ).expand((1, -1) ) + 2 return orig_state_dict def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : List[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model_state_dict"] lowerCamelCase : List[str] = YosoConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Any = YosoForMaskedLM(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : List[Any] = convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE_ ) print(model.load_state_dict(SCREAMING_SNAKE_CASE_ ) ) model.eval() model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The json file for YOSO model config.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _snake_case = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
283
0
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _snake_case = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( a , unittest.TestCase): lowerCamelCase__ = XLMRobertaTokenizer lowerCamelCase__ = XLMRobertaTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = True def snake_case__ ( self): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase : int = XLMRobertaTokenizer(__a, keep_accents=__a) tokenizer.save_pretrained(self.tmpdirname) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = "<pad>" _lowerCAmelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a), __a) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a), __a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<s>") self.assertEqual(vocab_keys[1], "<pad>") self.assertEqual(vocab_keys[-1], "<mask>") self.assertEqual(len(__a), 1002) def snake_case__ ( self): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size, 1002) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = XLMRobertaTokenizer(__a, keep_accents=__a) _lowerCAmelCase : List[Any] = tokenizer.tokenize("This is a test") self.assertListEqual(__a, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(__a), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) _lowerCAmelCase : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( __a, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) _lowerCAmelCase : Any = tokenizer.convert_tokens_to_ids(__a) self.assertListEqual( __a, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ], ) _lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(__a) self.assertListEqual( __a, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) def snake_case__ ( self): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _lowerCAmelCase : str = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): _lowerCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a, **__a) _lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(__a, **__a) _lowerCAmelCase : Any = tempfile.mkdtemp() _lowerCAmelCase : Any = tokenizer_r.save_pretrained(__a) _lowerCAmelCase : Optional[int] = tokenizer_p.save_pretrained(__a) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) _lowerCAmelCase : int = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(__a, __a) # Checks everything loads correctly in the same way _lowerCAmelCase : List[Any] = tokenizer_r.from_pretrained(__a) _lowerCAmelCase : Tuple = tokenizer_p.from_pretrained(__a) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__a, __a)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__a) # Save tokenizer rust, legacy_format=True _lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp() _lowerCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(__a, legacy_format=__a) _lowerCAmelCase : List[Any] = tokenizer_p.save_pretrained(__a) # Checks it save with the same files self.assertSequenceEqual(__a, __a) # Checks everything loads correctly in the same way _lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(__a) _lowerCAmelCase : Dict = tokenizer_p.from_pretrained(__a) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__a, __a)) shutil.rmtree(__a) # Save tokenizer rust, legacy_format=False _lowerCAmelCase : List[Any] = tempfile.mkdtemp() _lowerCAmelCase : Any = tokenizer_r.save_pretrained(__a, legacy_format=__a) _lowerCAmelCase : int = tokenizer_p.save_pretrained(__a) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way _lowerCAmelCase : Tuple = tokenizer_r.from_pretrained(__a) _lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(__a) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__a, __a)) shutil.rmtree(__a) @cached_property def snake_case__ ( self): '''simple docstring''' return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base") def snake_case__ ( self): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__a, f.name) _lowerCAmelCase : Dict = XLMRobertaTokenizer(f.name, keep_accents=__a) _lowerCAmelCase : Optional[int] = pickle.dumps(__a) pickle.loads(__a) def snake_case__ ( self): '''simple docstring''' if not self.test_rust_tokenizer: return _lowerCAmelCase : Any = self.get_tokenizer() _lowerCAmelCase : int = self.get_rust_tokenizer() _lowerCAmelCase : Tuple = "I was born in 92000, and this is falsé." _lowerCAmelCase : Any = tokenizer.tokenize(__a) _lowerCAmelCase : List[str] = rust_tokenizer.tokenize(__a) self.assertListEqual(__a, __a) _lowerCAmelCase : Tuple = tokenizer.encode(__a, add_special_tokens=__a) _lowerCAmelCase : Dict = rust_tokenizer.encode(__a, add_special_tokens=__a) self.assertListEqual(__a, __a) _lowerCAmelCase : List[str] = self.get_rust_tokenizer() _lowerCAmelCase : List[Any] = tokenizer.encode(__a) _lowerCAmelCase : Optional[int] = rust_tokenizer.encode(__a) self.assertListEqual(__a, __a) @slow def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : List[str] = "Hello World!" _lowerCAmelCase : int = [0, 3_5378, 6661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__a, self.big_tokenizer.encode(__a)) @slow def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) _lowerCAmelCase : Tuple = [ 0, 3293, 83, 10, 4552, 4989, 7986, 678, 10, 5915, 111, 17_9459, 12_4850, 4, 6044, 237, 12, 6, 5, 6, 4, 6780, 705, 15, 1388, 44, 378, 1_0114, 711, 152, 20, 6, 5, 2_2376, 642, 1221, 1_5190, 3_4153, 450, 5608, 959, 1119, 5_7702, 136, 186, 47, 1098, 2_9367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6044, 237, 6284, 5_0901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__a, self.big_tokenizer.encode(__a)) @slow def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = {"input_ids": [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__a, model_name="xlm-roberta-base", revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3", )
36
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Optional[int] = "M-CLIP" def __init__( self , __A=1024 , __A=768 , **__A ): """simple docstring""" lowerCamelCase : str = transformerDimSize lowerCamelCase : Any = imageDimSize super().__init__(**__A ) class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Tuple = MCLIPConfig def __init__( self , __A , *__A , **__A ): """simple docstring""" super().__init__(__A , *__A , **__A ) lowerCamelCase : Tuple = XLMRobertaModel(__A ) lowerCamelCase : Optional[Any] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def _snake_case ( self , __A , __A ): """simple docstring""" lowerCamelCase : Any = self.transformer(input_ids=__A , attention_mask=__A )[0] lowerCamelCase : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(__A ), embs
283
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''', } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : List[str] = '''lxmert''' __lowercase : str = {} def __init__( self ,__UpperCAmelCase=3_0522 ,__UpperCAmelCase=768 ,__UpperCAmelCase=12 ,__UpperCAmelCase=9500 ,__UpperCAmelCase=1600 ,__UpperCAmelCase=400 ,__UpperCAmelCase=3072 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=512 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-12 ,__UpperCAmelCase=9 ,__UpperCAmelCase=5 ,__UpperCAmelCase=5 ,__UpperCAmelCase=2048 ,__UpperCAmelCase=4 ,__UpperCAmelCase=6.6_7 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,**__UpperCAmelCase ,) -> Any: lowerCAmelCase__ : str = vocab_size lowerCAmelCase__ : int = hidden_size lowerCAmelCase__ : List[str] = num_attention_heads lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : List[str] = intermediate_size lowerCAmelCase__ : Dict = hidden_dropout_prob lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob lowerCAmelCase__ : Optional[int] = max_position_embeddings lowerCAmelCase__ : Optional[Any] = type_vocab_size lowerCAmelCase__ : Tuple = initializer_range lowerCAmelCase__ : Dict = layer_norm_eps lowerCAmelCase__ : Union[str, Any] = num_qa_labels lowerCAmelCase__ : Optional[int] = num_object_labels lowerCAmelCase__ : Dict = num_attr_labels lowerCAmelCase__ : Tuple = l_layers lowerCAmelCase__ : str = x_layers lowerCAmelCase__ : Any = r_layers lowerCAmelCase__ : Dict = visual_feat_dim lowerCAmelCase__ : Union[str, Any] = visual_pos_dim lowerCAmelCase__ : Union[str, Any] = visual_loss_normalizer lowerCAmelCase__ : Optional[Any] = task_matched lowerCAmelCase__ : Any = task_mask_lm lowerCAmelCase__ : Union[str, Any] = task_obj_predict lowerCAmelCase__ : List[str] = task_qa lowerCAmelCase__ : Any = visual_obj_loss lowerCAmelCase__ : Dict = visual_attr_loss lowerCAmelCase__ : List[str] = visual_feat_loss lowerCAmelCase__ : Optional[Any] = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers} super().__init__(**__UpperCAmelCase )
37
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = tempfile.mkdtemp() # fmt: off lowerCamelCase : Any = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on lowerCamelCase : List[Any] = dict(zip(__A , range(len(__A ) ) ) ) lowerCamelCase : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] lowerCamelCase : Optional[Any] = {"unk_token": "<unk>"} lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__A ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__A ) ) lowerCamelCase : str = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } lowerCamelCase : str = os.path.join(self.tmpdirname , __A ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(__A , __A ) def _snake_case ( self , **__A ): """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__A ) def _snake_case ( self , **__A ): """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__A ) def _snake_case ( self , **__A ): """simple docstring""" return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__A ) def _snake_case ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase : Tuple = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = self.get_tokenizer() lowerCamelCase : Optional[Any] = self.get_rust_tokenizer() lowerCamelCase : Tuple = self.get_image_processor() lowerCamelCase : List[Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A ) processor_slow.save_pretrained(self.tmpdirname ) lowerCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__A ) lowerCamelCase : Optional[int] = OwlViTProcessor(tokenizer=__A , image_processor=__A ) processor_fast.save_pretrained(self.tmpdirname ) lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __A ) self.assertIsInstance(processor_fast.tokenizer , __A ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __A ) self.assertIsInstance(processor_fast.image_processor , __A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowerCamelCase : List[str] = self.get_image_processor(do_normalize=__A ) lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__A ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = self.get_image_processor() lowerCamelCase : Optional[int] = self.get_tokenizer() lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Tuple = self.prepare_image_inputs() lowerCamelCase : int = image_processor(__A , return_tensors="np" ) lowerCamelCase : Union[str, Any] = processor(images=__A , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Union[str, Any] = self.get_image_processor() lowerCamelCase : Dict = self.get_tokenizer() lowerCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Tuple = "lower newer" lowerCamelCase : Union[str, Any] = processor(text=__A , return_tensors="np" ) lowerCamelCase : List[Any] = tokenizer(__A , return_tensors="np" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Any = self.get_image_processor() lowerCamelCase : Any = self.get_tokenizer() lowerCamelCase : int = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Optional[Any] = "lower newer" lowerCamelCase : Dict = self.prepare_image_inputs() lowerCamelCase : Any = processor(text=__A , images=__A ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Any = "google/owlvit-base-patch32" lowerCamelCase : List[Any] = OwlViTProcessor.from_pretrained(__A ) lowerCamelCase : Tuple = ["cat", "nasa badge"] lowerCamelCase : str = processor(text=__A ) lowerCamelCase : Union[str, Any] = 16 self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = "google/owlvit-base-patch32" lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(__A ) lowerCamelCase : Dict = [["cat", "nasa badge"], ["person"]] lowerCamelCase : int = processor(text=__A ) lowerCamelCase : Tuple = 16 lowerCamelCase : Any = len(__A ) lowerCamelCase : Optional[Any] = max([len(__A ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = "google/owlvit-base-patch32" lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(__A ) lowerCamelCase : List[Any] = ["cat", "nasa badge"] lowerCamelCase : Optional[Any] = processor(text=__A ) lowerCamelCase : int = 16 lowerCamelCase : List[str] = inputs["input_ids"] lowerCamelCase : int = [ [4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Any = self.get_image_processor() lowerCamelCase : List[str] = self.get_tokenizer() lowerCamelCase : str = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Dict = self.prepare_image_inputs() lowerCamelCase : Union[str, Any] = self.prepare_image_inputs() lowerCamelCase : Any = processor(images=__A , query_images=__A ) self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[Any] = self.get_image_processor() lowerCamelCase : Optional[int] = self.get_tokenizer() lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase : List[Any] = processor.batch_decode(__A ) lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(__A ) self.assertListEqual(__A , __A )
283
0
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> List[str]: """simple docstring""" UpperCamelCase :str = [] UpperCamelCase :List[str] = set({"""(""", """[""", """{"""} ) UpperCamelCase :Any = set({""")""", """]""", """}"""} ) UpperCamelCase :str = {"""{""": """}""", """[""": """]""", """(""": """)"""} for i in range(len(__magic_name__ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(__magic_name__ ) == 0 or (len(__magic_name__ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(__magic_name__ ) == 0 def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]: """simple docstring""" UpperCamelCase :int = input("""Enter sequence of brackets: """ ) if is_balanced(__magic_name__ ): print(__magic_name__ , """is balanced""" ) else: print(__magic_name__ , """is not balanced""" ) if __name__ == "__main__": main()
38
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase_ ( UpperCamelCase , unittest.TestCase ): '''simple docstring''' __A : List[Any] = BioGptTokenizer __A : Optional[int] = False def _snake_case ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase : Union[str, Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] lowerCamelCase : str = dict(zip(__A , range(len(__A ) ) ) ) lowerCamelCase : Dict = ["l o 123", "lo w 1456", "e r</w> 1789", ""] lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(__A ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(__A ) ) def _snake_case ( self , __A ): """simple docstring""" lowerCamelCase : Dict = "lower newer" lowerCamelCase : Union[str, Any] = "lower newer" return input_text, output_text def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[str] = BioGptTokenizer(self.vocab_file , self.merges_file ) lowerCamelCase : Optional[int] = "lower" lowerCamelCase : Any = ["low", "er</w>"] lowerCamelCase : List[str] = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCamelCase : Union[str, Any] = tokens + ["<unk>"] lowerCamelCase : List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) @slow def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[str] = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) lowerCamelCase : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=__A ) lowerCamelCase : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=__A ) lowerCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__A ) lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
283
0
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __lowerCamelCase ( snake_case__): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = "arrow" , **UpperCAmelCase , ): """simple docstring""" super().__init__( split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , **UpperCAmelCase , ) _UpperCAmelCase = load_from_cache_file _UpperCAmelCase = file_format _UpperCAmelCase = Spark( df=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , working_dir=UpperCAmelCase , **UpperCAmelCase , ) def UpperCamelCase ( self ): """simple docstring""" if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) _UpperCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCAmelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
39
def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if divisor % 5 == 0 or divisor % 2 == 0: return 0 lowerCamelCase : List[Any] = 1 lowerCamelCase : Union[str, Any] = 1 while repunit: lowerCamelCase : Union[str, Any] = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def lowercase_( SCREAMING_SNAKE_CASE_ = 1000000 ): '''simple docstring''' lowerCamelCase : List[str] = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(SCREAMING_SNAKE_CASE_ ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f'''{solution() = }''')
283
0
"""simple docstring""" class _A : """simple docstring""" def __init__( self : int , __UpperCAmelCase : int): a : Tuple = size a : Dict = [0] * size a : Optional[int] = [0] * size @staticmethod def __snake_case ( __UpperCAmelCase : int): return index | (index + 1) @staticmethod def __snake_case ( __UpperCAmelCase : int): return (index & (index + 1)) - 1 def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int): a : Union[str, Any] = value while index < self.size: a : Dict = self.get_prev(__UpperCAmelCase) + 1 if current_left_border == index: a : Optional[int] = value else: a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a : Optional[int] = self.get_next(__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int): right -= 1 # Because of right is exclusive a : List[str] = 0 while left <= right: a : Dict = self.get_prev(__UpperCAmelCase) if left <= current_left: a : Optional[int] = max(__UpperCAmelCase , self.tree[right]) a : Optional[Any] = current_left else: a : List[str] = max(__UpperCAmelCase , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
40
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ): '''simple docstring''' lowerCamelCase : Tuple = "backbone." if is_semantic else "" lowerCamelCase : int = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ (f"""{prefix}cls_token""", "beit.embeddings.cls_token"), (f"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"), (f"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"), (f"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ("mask_token", "beit.embeddings.mask_token"), ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) else: # layernorm + classification head rename_keys.extend( [ ("fc_norm.weight", "beit.pooler.layernorm.weight"), ("fc_norm.bias", "beit.pooler.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): lowerCamelCase : Optional[Any] = "backbone." if is_semantic else "" # queries, keys and values lowerCamelCase : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" ) lowerCamelCase : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" ) lowerCamelCase : Tuple = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" ) lowerCamelCase : str = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase : Any = q_bias lowerCamelCase : Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase : Optional[int] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase : int = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowerCamelCase : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" ) lowerCamelCase : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" ) lowerCamelCase : int = gamma_a lowerCamelCase : Optional[Any] = gamma_a def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Optional[Any] = dct.pop(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : List[Any] = val def lowercase_( ): '''simple docstring''' lowerCamelCase : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): '''simple docstring''' lowerCamelCase : List[Any] = False if "rvlcdip" in checkpoint_url else True lowerCamelCase : str = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE_ , use_mask_token=SCREAMING_SNAKE_CASE_ ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowerCamelCase : Union[str, Any] = 1024 lowerCamelCase : Any = 4096 lowerCamelCase : str = 24 lowerCamelCase : List[Any] = 16 # labels if "rvlcdip" in checkpoint_url: lowerCamelCase : Optional[Any] = 16 lowerCamelCase : Tuple = "huggingface/label-files" lowerCamelCase : List[str] = "rvlcdip-id2label.json" lowerCamelCase : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) lowerCamelCase : Any = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} lowerCamelCase : Tuple = idalabel lowerCamelCase : Dict = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowerCamelCase : int = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"] lowerCamelCase : Tuple = create_rename_keys(SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ ) # load HuggingFace model lowerCamelCase : List[Any] = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE_ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE_ ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # Check outputs on an image lowerCamelCase : str = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Any = prepare_img() lowerCamelCase : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ) lowerCamelCase : Optional[Any] = encoding["pixel_values"] lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Dict = outputs.logits # verify logits lowerCamelCase : List[Any] = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE_ ), "Shape of logits not as expected" Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: if has_lm_head: lowerCamelCase : Optional[Any] = "dit-base" if "base" in checkpoint_url else "dit-large" else: lowerCamelCase : Dict = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip" image_processor.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) model.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) _snake_case = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
283
0
'''simple docstring''' import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _lowercase ( _lowercase , unittest.TestCase ): a = PhobertTokenizer a = False def lowerCamelCase_ ( self: Optional[Any] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase__ : List[Any] = ["""T@@""", """i""", """I""", """R@@""", """r""", """e@@"""] lowerCamelCase__ : str = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) lowerCamelCase__ : Optional[int] = ["""#version: 0.2""", """l à</w>"""] lowerCamelCase__ : Tuple = {"""unk_token""": """<unk>"""} lowerCamelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: for token in vocab_tokens: fp.write(F'''{token} {vocab_tokens[token]}\n''' ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase__ ) ) def lowerCamelCase_ ( self: Optional[int] , **UpperCamelCase__: int ): kwargs.update(self.special_tokens_map ) return PhobertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: int ): lowerCamelCase__ : int = """Tôi là VinAI Research""" lowerCamelCase__ : Tuple = """T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>""" return input_text, output_text def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Dict = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCamelCase__ : Optional[Any] = """Tôi là VinAI Research""" lowerCamelCase__ : List[str] = """T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h""".split() lowerCamelCase__ : str = tokenizer.tokenize(UpperCamelCase__ ) print(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ : Any = tokens + [tokenizer.unk_token] lowerCamelCase__ : Any = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
41
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Dict = ["image_processor", "tokenizer"] __A : Dict = "BridgeTowerImageProcessor" __A : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , __A , __A ): """simple docstring""" super().__init__(__A , __A ) def __call__( self , __A , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ): """simple docstring""" lowerCamelCase : str = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) # add pixel_values + pixel_mask lowerCamelCase : int = self.image_processor( __A , return_tensors=__A , do_normalize=__A , do_center_crop=__A , **__A ) encoding.update(__A ) return encoding def _snake_case ( self , *__A , **__A ): """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self , *__A , **__A ): """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = self.tokenizer.model_input_names lowerCamelCase : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
283
0
'''simple docstring''' import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class __UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self ): """simple docstring""" _snake_case = tempfile.mkdtemp() _snake_case = SamImageProcessor() _snake_case = SamProcessor(lowerCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) def lowerCamelCase ( self , **lowerCAmelCase_ ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor def lowerCamelCase ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _snake_case = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase ( self ): """simple docstring""" _snake_case = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _snake_case = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 ) _snake_case = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCAmelCase_ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.get_image_processor() _snake_case = SamProcessor(image_processor=lowerCAmelCase_ ) _snake_case = self.prepare_image_inputs() _snake_case = image_processor(lowerCAmelCase_ , return_tensors='np' ) _snake_case = processor(images=lowerCAmelCase_ , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_torch def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.get_image_processor() _snake_case = SamProcessor(image_processor=lowerCAmelCase_ ) _snake_case = [torch.ones((1, 3, 5, 5) )] _snake_case = [[17_64, 26_46]] _snake_case = [[6_83, 10_24]] _snake_case = processor.post_process_masks(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) _snake_case = processor.post_process_masks( lowerCAmelCase_ , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np _snake_case = [np.ones((1, 3, 5, 5) )] _snake_case = processor.post_process_masks(lowerCAmelCase_ , np.array(lowerCAmelCase_ ) , np.array(lowerCAmelCase_ ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) _snake_case = [[1, 0], [0, 1]] with self.assertRaises(lowerCAmelCase_ ): _snake_case = processor.post_process_masks(lowerCAmelCase_ , np.array(lowerCAmelCase_ ) , np.array(lowerCAmelCase_ ) ) @require_vision @require_tf class __UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self ): """simple docstring""" _snake_case = tempfile.mkdtemp() _snake_case = SamImageProcessor() _snake_case = SamProcessor(lowerCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) def lowerCamelCase ( self , **lowerCAmelCase_ ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor def lowerCamelCase ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _snake_case = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase ( self ): """simple docstring""" _snake_case = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _snake_case = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 ) _snake_case = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCAmelCase_ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.get_image_processor() _snake_case = SamProcessor(image_processor=lowerCAmelCase_ ) _snake_case = self.prepare_image_inputs() _snake_case = image_processor(lowerCAmelCase_ , return_tensors='np' ) _snake_case = processor(images=lowerCAmelCase_ , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_tf def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.get_image_processor() _snake_case = SamProcessor(image_processor=lowerCAmelCase_ ) _snake_case = [tf.ones((1, 3, 5, 5) )] _snake_case = [[17_64, 26_46]] _snake_case = [[6_83, 10_24]] _snake_case = processor.post_process_masks(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) _snake_case = processor.post_process_masks( lowerCAmelCase_ , tf.convert_to_tensor(lowerCAmelCase_ ) , tf.convert_to_tensor(lowerCAmelCase_ ) , return_tensors='tf' , ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np _snake_case = [np.ones((1, 3, 5, 5) )] _snake_case = processor.post_process_masks( lowerCAmelCase_ , np.array(lowerCAmelCase_ ) , np.array(lowerCAmelCase_ ) , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) _snake_case = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): _snake_case = processor.post_process_masks( lowerCAmelCase_ , np.array(lowerCAmelCase_ ) , np.array(lowerCAmelCase_ ) , return_tensors='tf' ) @require_vision @require_torchvision class __UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self ): """simple docstring""" _snake_case = tempfile.mkdtemp() _snake_case = SamImageProcessor() _snake_case = SamProcessor(lowerCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) def lowerCamelCase ( self , **lowerCAmelCase_ ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor def lowerCamelCase ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _snake_case = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.get_image_processor() _snake_case = SamProcessor(image_processor=lowerCAmelCase_ ) _snake_case = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) _snake_case = [tf.convert_to_tensor(lowerCAmelCase_ )] _snake_case = [torch.tensor(lowerCAmelCase_ )] _snake_case = [[17_64, 26_46]] _snake_case = [[6_83, 10_24]] _snake_case = processor.post_process_masks( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , return_tensors='tf' ) _snake_case = processor.post_process_masks( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , return_tensors='pt' ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.get_image_processor() _snake_case = SamProcessor(image_processor=lowerCAmelCase_ ) _snake_case = self.prepare_image_inputs() _snake_case = image_processor(lowerCAmelCase_ , return_tensors='pt' )['pixel_values'].numpy() _snake_case = processor(images=lowerCAmelCase_ , return_tensors='pt' )['pixel_values'].numpy() _snake_case = image_processor(lowerCAmelCase_ , return_tensors='tf' )['pixel_values'].numpy() _snake_case = processor(images=lowerCAmelCase_ , return_tensors='tf' )['pixel_values'].numpy() self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ ) ) self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ ) ) self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ ) )
42
def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 , 0 , -1 ): lowerCamelCase : Tuple = False for j in range(SCREAMING_SNAKE_CASE_ , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: lowerCamelCase , lowerCamelCase : int = unsorted[j - 1], unsorted[j] lowerCamelCase : Optional[int] = True for j in range(SCREAMING_SNAKE_CASE_ ): if unsorted[j] > unsorted[j + 1]: lowerCamelCase , lowerCamelCase : Union[str, Any] = unsorted[j + 1], unsorted[j] lowerCamelCase : Optional[Any] = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() _snake_case = input('''Enter numbers separated by a comma:\n''').strip() _snake_case = [int(item) for item in user_input.split(''',''')] print(f'''{cocktail_shaker_sort(unsorted) = }''')
283
0
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL __lowercase = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''') def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , ): '''simple docstring''' output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , enable_onnx_checker=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , ) else: export( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , ) @torch.no_grad() def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ): '''simple docstring''' __UpperCamelCase :List[Any] = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): __UpperCamelCase :List[Any] = '''cuda''' elif fpaa and not torch.cuda.is_available(): raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' ) else: __UpperCamelCase :int = '''cpu''' __UpperCamelCase :List[str] = Path(SCREAMING_SNAKE_CASE ) # VAE DECODER __UpperCamelCase :Optional[Any] = AutoencoderKL.from_pretrained(model_path + '''/vae''' ) __UpperCamelCase :Tuple = vae_decoder.config.latent_channels # forward only through the decoder part __UpperCamelCase :Dict = vae_decoder.decode onnx_export( SCREAMING_SNAKE_CASE , model_args=( torch.randn(1 , SCREAMING_SNAKE_CASE , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ), False, ) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={ '''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=SCREAMING_SNAKE_CASE , ) del vae_decoder if __name__ == "__main__": __lowercase = argparse.ArgumentParser() parser.add_argument( '''--model_path''', type=str, required=True, help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''', ) parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--opset''', default=14, type=int, help='''The version of the ONNX operator set to use.''', ) parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''') __lowercase = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print('''SD: Done: ONNX''')
43
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ): '''simple docstring''' __A : Tuple = StableUnCLIPPipeline __A : Optional[int] = TEXT_TO_IMAGE_PARAMS __A : str = TEXT_TO_IMAGE_BATCH_PARAMS __A : int = TEXT_TO_IMAGE_IMAGE_PARAMS __A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false __A : Union[str, Any] = False def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[str] = 32 lowerCamelCase : Dict = embedder_hidden_size # prior components torch.manual_seed(0 ) lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowerCamelCase : Optional[int] = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=__A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) lowerCamelCase : List[Any] = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__A , num_layers=1 , ) torch.manual_seed(0 ) lowerCamelCase : Dict = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=__A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) lowerCamelCase : Optional[int] = StableUnCLIPImageNormalizer(embedding_dim=__A ) lowerCamelCase : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) lowerCamelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowerCamelCase : str = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) lowerCamelCase : Any = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__A , layers_per_block=1 , upcast_attention=__A , use_linear_projection=__A , ) torch.manual_seed(0 ) lowerCamelCase : int = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=__A , steps_offset=1 , ) torch.manual_seed(0 ) lowerCamelCase : Optional[Any] = AutoencoderKL() lowerCamelCase : Optional[int] = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def _snake_case ( self , __A , __A=0 ): """simple docstring""" if str(__A ).startswith("mps" ): lowerCamelCase : Optional[int] = torch.manual_seed(__A ) else: lowerCamelCase : Optional[Any] = torch.Generator(device=__A ).manual_seed(__A ) lowerCamelCase : Tuple = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=__A ) @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) lowerCamelCase : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase : Dict = pipe("anime turle" , generator=__A , output_type="np" ) lowerCamelCase : Dict = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__A , __A ) def _snake_case ( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) lowerCamelCase : Union[str, Any] = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase : Any = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) lowerCamelCase : List[str] = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
283
0
"""simple docstring""" import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): _UpperCamelCase : str = TransfoXLTokenizer _UpperCamelCase : List[str] = False _UpperCamelCase : int = False def __A ( self ): super().setUp() _lowerCAmelCase : int = [ """<unk>""", """[CLS]""", """[SEP]""", """want""", """unwanted""", """wa""", """un""", """running""", """,""", """low""", """l""", ] _lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def __A ( self , **a__ ): _lowerCAmelCase : Dict = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **a__ ) def __A ( self , a__ ): _lowerCAmelCase : Dict = """<unk> UNwanted , running""" _lowerCAmelCase : List[str] = """<unk> unwanted, running""" return input_text, output_text def __A ( self ): _lowerCAmelCase : List[str] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=a__ ) _lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("""<unk> UNwanted , running""" ) self.assertListEqual(a__ , ["""<unk>""", """unwanted""", """,""", """running"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [0, 4, 8, 7] ) def __A ( self ): _lowerCAmelCase : Dict = TransfoXLTokenizer(lower_case=a__ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) def __A ( self ): _lowerCAmelCase : Any = TransfoXLTokenizer(lower_case=a__ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def __A ( self ): _lowerCAmelCase : Optional[int] = TransfoXLTokenizer(lower_case=a__ ) _lowerCAmelCase : Dict = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?""" _lowerCAmelCase : Tuple = [ """Hello""", """(""", """bracket""", """)""", """and""", """side""", """@-@""", """scrolled""", """[""", """and""", """]""", """Henry""", """'s""", """$""", """5""", """@,@""", """000""", """with""", """3""", """@.@""", """34""", """m""", """.""", """What""", """'s""", """up""", """!""", """?""", ] self.assertListEqual(tokenizer.tokenize(a__ ) , a__ ) self.assertEqual(tokenizer.convert_tokens_to_string(a__ ) , a__ ) def __A ( self ): _lowerCAmelCase : str = self.get_tokenizer() _lowerCAmelCase : Optional[int] = len(a__ ) tokenizer.add_tokens(["""new1""", """new2"""] ) tokenizer.move_added_token("""new1""" , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(a__ ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode("""new1""" ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , """new1""" )
44
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _snake_case = { '''configuration_squeezebert''': [ '''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SqueezeBertConfig''', '''SqueezeBertOnnxConfig''', ], '''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''SqueezeBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SqueezeBertForMaskedLM''', '''SqueezeBertForMultipleChoice''', '''SqueezeBertForQuestionAnswering''', '''SqueezeBertForSequenceClassification''', '''SqueezeBertForTokenClassification''', '''SqueezeBertModel''', '''SqueezeBertModule''', '''SqueezeBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
283
0
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name lowercase_ = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n" @dataclass class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCAmelCase : Union[PIL.Image.Image, np.ndarray] class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self , _a , _a , _a , _a , _a , ): super().__init__() self.register_modules( prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , ) def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ): if latents is None: __a = randn_tensor(_a , generator=_a , device=_a , dtype=_a ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) __a = latents.to(_a ) __a = latents * scheduler.init_noise_sigma return latents def __UpperCAmelCase ( self , _a=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) __a = torch.device(f'''cuda:{gpu_id}''' ) __a = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_a , _a ) @property def __UpperCAmelCase ( self ): if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_a , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def __UpperCAmelCase ( self , _a , _a , _a , _a , ): if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ): __a = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 ) if not isinstance(_a , torch.Tensor ): __a = self.image_processor(_a , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 ) __a = image.to(dtype=self.image_encoder.dtype , device=_a ) __a = self.image_encoder(_a )['''last_hidden_state'''] __a = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 __a = image_embeds.repeat_interleave(_a , dim=0 ) if do_classifier_free_guidance: __a = torch.zeros_like(_a ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __a = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_a ) def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ): if isinstance(_a , PIL.Image.Image ): __a = 1 elif isinstance(_a , torch.Tensor ): __a = image.shape[0] elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): __a = len(_a ) else: raise ValueError( f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' ) __a = self._execution_device __a = batch_size * num_images_per_prompt __a = guidance_scale > 1.0 __a = self._encode_image(_a , _a , _a , _a ) # prior self.scheduler.set_timesteps(_a , device=_a ) __a = self.scheduler.timesteps __a = self.prior.config.num_embeddings __a = self.prior.config.embedding_dim __a = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim __a = latents.reshape(latents.shape[0] , _a , _a ) for i, t in enumerate(self.progress_bar(_a ) ): # expand the latents if we are doing classifier free guidance __a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __a = self.scheduler.scale_model_input(_a , _a ) __a = self.prior( _a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding # remove the variance __a , __a = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: __a , __a = noise_pred.chunk(2 ) __a = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) __a = self.scheduler.step( _a , timestep=_a , sample=_a , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_a ) __a = [] for i, latent in enumerate(_a ): print() __a = self.renderer.decode( latent[None, :] , _a , size=_a , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(_a ) __a = torch.stack(_a ) if output_type not in ["np", "pil"]: raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' ) __a = images.cpu().numpy() if output_type == "pil": __a = [self.numpy_to_pil(_a ) for image in images] # Offload last model to CPU if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_a )
45
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''edbeeching/decision-transformer-gym-hopper-medium''': ( '''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json''' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : str = "decision_transformer" __A : Union[str, Any] = ["past_key_values"] __A : Optional[int] = { "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , __A=17 , __A=4 , __A=128 , __A=4096 , __A=True , __A=1 , __A=1024 , __A=3 , __A=1 , __A=None , __A="relu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=1e-5 , __A=0.02 , __A=True , __A=True , __A=5_0256 , __A=5_0256 , __A=False , __A=False , **__A , ): """simple docstring""" lowerCamelCase : List[str] = state_dim lowerCamelCase : Tuple = act_dim lowerCamelCase : List[str] = hidden_size lowerCamelCase : Optional[Any] = max_ep_len lowerCamelCase : Union[str, Any] = action_tanh lowerCamelCase : int = vocab_size lowerCamelCase : List[Any] = n_positions lowerCamelCase : Dict = n_layer lowerCamelCase : int = n_head lowerCamelCase : List[Any] = n_inner lowerCamelCase : Any = activation_function lowerCamelCase : Optional[int] = resid_pdrop lowerCamelCase : str = embd_pdrop lowerCamelCase : Tuple = attn_pdrop lowerCamelCase : List[Any] = layer_norm_epsilon lowerCamelCase : Dict = initializer_range lowerCamelCase : Optional[int] = scale_attn_weights lowerCamelCase : List[Any] = use_cache lowerCamelCase : Tuple = scale_attn_by_inverse_layer_idx lowerCamelCase : Optional[int] = reorder_and_upcast_attn lowerCamelCase : Dict = bos_token_id lowerCamelCase : Any = eos_token_id super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
283
0
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # TODO: upload to AWS SCREAMING_SNAKE_CASE__ = { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json" ), } class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = 'retribert' def __init__( self , lowercase=30_522 , lowercase=768 , lowercase=8 , lowercase=12 , lowercase=3_072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=True , lowercase=128 , lowercase=0 , **lowercase , ) -> str: super().__init__(pad_token_id=lowercase , **lowercase ) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = share_encoders lowerCAmelCase = projection_dim
46
def lowercase_( SCREAMING_SNAKE_CASE_ = 4000000 ): '''simple docstring''' lowerCamelCase : Any = [0, 1] lowerCamelCase : Union[str, Any] = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 lowerCamelCase : Union[str, Any] = 0 for j in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f'''{solution() = }''')
283
0
'''simple docstring''' def _lowerCAmelCase ( _UpperCamelCase : list ) -> list: """simple docstring""" for i in range(len(_UpperCamelCase ) - 1 , 0 , -1 ): _SCREAMING_SNAKE_CASE =False for j in range(_UpperCamelCase , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =unsorted[j - 1], unsorted[j] _SCREAMING_SNAKE_CASE =True for j in range(_UpperCamelCase ): if unsorted[j] > unsorted[j + 1]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =unsorted[j + 1], unsorted[j] _SCREAMING_SNAKE_CASE =True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase : Tuple = input("Enter numbers separated by a comma:\n").strip() lowerCamelCase : Dict = [int(item) for item in user_input.split(",")] print(f'''{cocktail_shaker_sort(unsorted) = }''')
47
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _snake_case = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
283
0
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> List[str]: if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=UpperCamelCase__ , ) assert hasattr(self , "env" ) def _lowercase ( self , UpperCamelCase__ ) -> Dict: # configuration for running training on smdistributed Model Parallel lowerCamelCase : Any = { "enabled": True, "processes_per_host": 8, } lowerCamelCase : Union[str, Any] = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } lowerCamelCase : List[Any] = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} lowerCamelCase : Tuple = "trainer" if self.script == "run_glue.py" else "smtrainer" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 500, } , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version="py36" , ) def _lowercase ( self , UpperCamelCase__ ) -> Optional[Any]: TrainingJobAnalytics(UpperCamelCase__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(1,)] ) def _lowercase ( self , UpperCamelCase__ ) -> Tuple: # create estimator lowerCamelCase : Optional[Any] = self.create_estimator(UpperCamelCase__ ) # run training estimator.fit() # result dataframe lowerCamelCase : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCamelCase : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) lowerCamelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCamelCase : Any = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , UpperCamelCase__ )
48
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline _snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False) parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''') parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''') _snake_case = parser.parse_args() _snake_case = '''cpu''' _snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings''' _snake_case = '''path-to-your-trained-model''' _snake_case = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: _snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) _snake_case = pipe.to(device) # to channels last _snake_case = pipe.unet.to(memory_format=torch.channels_last) _snake_case = pipe.vae.to(memory_format=torch.channels_last) _snake_case = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: _snake_case = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex _snake_case = torch.randn(2, 4, 64, 64) _snake_case = torch.rand(1) * 9_99 _snake_case = torch.randn(2, 77, 7_68) _snake_case = (sample, timestep, encoder_hidden_status) try: _snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: _snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) _snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) _snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: _snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute _snake_case = 6_66 _snake_case = torch.Generator(device).manual_seed(seed) _snake_case = {'''generator''': generator} if args.steps is not None: _snake_case = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): _snake_case = pipe(prompt, **generate_kwargs).images[0] # save image image.save('''generated.png''')
283
0
import functools def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = len(_UpperCAmelCase ) __a = len(_UpperCAmelCase ) @functools.cache def min_distance(_UpperCAmelCase , _UpperCAmelCase ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa __a = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , _UpperCAmelCase ) , 1 + min_distance(_UpperCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
49
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class UpperCAmelCase_ : '''simple docstring''' def __init__( self , __A , ): """simple docstring""" lowerCamelCase : str = parent lowerCamelCase : Union[str, Any] = 13 lowerCamelCase : Optional[Any] = 7 lowerCamelCase : List[str] = True lowerCamelCase : Optional[int] = True lowerCamelCase : Union[str, Any] = True lowerCamelCase : List[Any] = True lowerCamelCase : Tuple = True lowerCamelCase : Any = False lowerCamelCase : int = False lowerCamelCase : Tuple = False lowerCamelCase : Union[str, Any] = 2 lowerCamelCase : Dict = 99 lowerCamelCase : Tuple = 0 lowerCamelCase : Any = 32 lowerCamelCase : List[Any] = 2 lowerCamelCase : Tuple = 4 lowerCamelCase : List[str] = 0.1 lowerCamelCase : int = 0.1 lowerCamelCase : int = 512 lowerCamelCase : List[Any] = 16 lowerCamelCase : Any = 2 lowerCamelCase : Any = 0.02 lowerCamelCase : List[str] = 3 lowerCamelCase : Tuple = 4 lowerCamelCase : int = "last" lowerCamelCase : int = True lowerCamelCase : Dict = None lowerCamelCase : Tuple = 0 def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) lowerCamelCase : Tuple = None if self.use_input_lengths: lowerCamelCase : Optional[Any] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCamelCase : str = None if self.use_token_type_ids: lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowerCamelCase : Dict = None lowerCamelCase : Dict = None lowerCamelCase : Tuple = None if self.use_labels: lowerCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase : int = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase : List[Any] = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Optional[Any] = TFFlaubertModel(config=__A ) lowerCamelCase : Any = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} lowerCamelCase : Dict = model(__A ) lowerCamelCase : Any = [input_ids, input_mask] lowerCamelCase : Tuple = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : int = TFFlaubertWithLMHeadModel(__A ) lowerCamelCase : List[str] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} lowerCamelCase : int = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(__A ) lowerCamelCase : Optional[int] = {"input_ids": input_ids, "lengths": input_lengths} lowerCamelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Optional[int] = TFFlaubertForSequenceClassification(__A ) lowerCamelCase : str = {"input_ids": input_ids, "lengths": input_lengths} lowerCamelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Tuple = self.num_labels lowerCamelCase : Optional[Any] = TFFlaubertForTokenClassification(config=__A ) lowerCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCamelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ): """simple docstring""" lowerCamelCase : Any = self.num_choices lowerCamelCase : Optional[Any] = TFFlaubertForMultipleChoice(config=__A ) lowerCamelCase : Tuple = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase : int = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase : List[str] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase : Optional[int] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } lowerCamelCase : Union[str, Any] = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = self.prepare_config_and_inputs() ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Optional[Any] = config_and_inputs lowerCamelCase : List[Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "langs": token_type_ids, "lengths": input_lengths, } return config, inputs_dict @require_tf class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): '''simple docstring''' __A : str = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) __A : Dict = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable __A : Any = ( { "feature-extraction": TFFlaubertModel, "fill-mask": TFFlaubertWithLMHeadModel, "question-answering": TFFlaubertForQuestionAnsweringSimple, "text-classification": TFFlaubertForSequenceClassification, "token-classification": TFFlaubertForTokenClassification, "zero-shot": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) __A : List[str] = False __A : List[str] = False def _snake_case ( self , __A , __A , __A , __A , __A ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _snake_case ( self ): """simple docstring""" lowerCamelCase : Tuple = TFFlaubertModelTester(self ) lowerCamelCase : Optional[int] = ConfigTester(self , config_class=__A , emb_dim=37 ) def _snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*__A ) @slow def _snake_case ( self ): """simple docstring""" for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : int = TFFlaubertModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @require_tf @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[int] = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" ) lowerCamelCase : str = tf.convert_to_tensor( [[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" lowerCamelCase : Dict = model(__A )[0] lowerCamelCase : List[str] = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape , __A ) # compare the actual values for a slice. lowerCamelCase : Tuple = tf.convert_to_tensor( [ [ [-1.8768773, -1.566555, 0.27072418], [-1.6920038, -0.5873505, 1.9329599], [-2.9563985, -1.6993835, 1.7972052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
283
0
import os _UpperCAmelCase : int = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00} def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int: lowerCamelCase__ : List[str] = 0 lowerCamelCase__ : Optional[Any] = 0 while index < len(_UpperCAmelCase ) - 1: lowerCamelCase__ : Optional[int] = SYMBOLS[numerals[index]] lowerCamelCase__ : Optional[Any] = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str: lowerCamelCase__ : int = '' lowerCamelCase__ : Any = num // 1000 numerals += m_count * "M" num %= 1000 lowerCamelCase__ : str = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 lowerCamelCase__ : List[str] = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = "/p089_roman.txt" ) -> int: lowerCamelCase__ : Tuple = 0 with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea: lowerCamelCase__ : List[str] = filea.readlines() for line in lines: lowerCamelCase__ : Optional[int] = line.strip() lowerCamelCase__ : Union[str, Any] = parse_roman_numerals(_UpperCAmelCase ) lowerCamelCase__ : List[Any] = generate_roman_numerals(_UpperCAmelCase ) savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase ) return savings if __name__ == "__main__": print(F"""{solution() = }""")
50
import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[] ): '''simple docstring''' lowerCamelCase : Optional[Any] = size[0] - overlap_pixels * 2 lowerCamelCase : int = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels lowerCamelCase : Tuple = np.ones((size_y, size_x) , dtype=np.uinta ) * 255 lowerCamelCase : List[Any] = np.pad(SCREAMING_SNAKE_CASE_ , mode="linear_ramp" , pad_width=SCREAMING_SNAKE_CASE_ , end_values=0 ) if "l" in remove_borders: lowerCamelCase : Optional[Any] = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: lowerCamelCase : List[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: lowerCamelCase : List[Any] = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: lowerCamelCase : Tuple = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return max(SCREAMING_SNAKE_CASE_ , min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Optional[Any] = list(SCREAMING_SNAKE_CASE_ ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap lowerCamelCase : Any = clamp_rect(SCREAMING_SNAKE_CASE_ , [0, 0] , [image_size[0], image_size[1]] ) return rect def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Dict = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(SCREAMING_SNAKE_CASE_ , (original_slice, 0) ) return result def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) lowerCamelCase : int = tile.crop(SCREAMING_SNAKE_CASE_ ) return tile def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : int = n % d return n - divisor class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' def __init__( self , __A , __A , __A , __A , __A , __A , __A = 350 , ): """simple docstring""" super().__init__( vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , low_res_scheduler=__A , scheduler=__A , max_noise_level=__A , ) def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , **__A ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase : Tuple = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) lowerCamelCase : Union[str, Any] = add_overlap_rect(__A , __A , image.size ) lowerCamelCase : List[str] = image.crop(__A ) lowerCamelCase : Optional[int] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] lowerCamelCase : int = translated_slice_x - (original_image_slice / 2) lowerCamelCase : Optional[Any] = max(0 , __A ) lowerCamelCase : Tuple = squeeze_tile(__A , __A , __A , __A ) lowerCamelCase : Dict = to_input.size lowerCamelCase : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) lowerCamelCase : Dict = super(__A , self ).__call__(image=__A , **__A ).images[0] lowerCamelCase : Tuple = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) lowerCamelCase : Optional[Any] = unsqueeze_tile(__A , __A ) lowerCamelCase : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) lowerCamelCase : int = [] if x == 0: remove_borders.append("l" ) elif crop_rect[2] == image.size[0]: remove_borders.append("r" ) if y == 0: remove_borders.append("t" ) elif crop_rect[3] == image.size[1]: remove_borders.append("b" ) lowerCamelCase : int = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__A ) , mode="L" , ) final_image.paste( __A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __A ) @torch.no_grad() def __call__( self , __A , __A , __A = 75 , __A = 9.0 , __A = 50 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = None , __A = 1 , __A = 128 , __A = 32 , __A = 32 , ): """simple docstring""" lowerCamelCase : Dict = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) ) lowerCamelCase : Union[str, Any] = math.ceil(image.size[0] / tile_size ) lowerCamelCase : Dict = math.ceil(image.size[1] / tile_size ) lowerCamelCase : str = tcx * tcy lowerCamelCase : int = 0 for y in range(__A ): for x in range(__A ): self._process_tile( __A , __A , __A , __A , __A , __A , __A , prompt=__A , num_inference_steps=__A , guidance_scale=__A , noise_level=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , ) current_count += 1 if callback is not None: callback({"progress": current_count / total_tile_count, "image": final_image} ) return final_image def lowercase_( ): '''simple docstring''' lowerCamelCase : Dict = "stabilityai/stable-diffusion-x4-upscaler" lowerCamelCase : Union[str, Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , revision="fp16" , torch_dtype=torch.floataa ) lowerCamelCase : Optional[Any] = pipe.to("cuda" ) lowerCamelCase : List[str] = Image.open("../../docs/source/imgs/diffusers_library.jpg" ) def callback(SCREAMING_SNAKE_CASE_ ): print(f"""progress: {obj['progress']:.4f}""" ) obj["image"].save("diffusers_library_progress.jpg" ) lowerCamelCase : int = pipe(image=SCREAMING_SNAKE_CASE_ , prompt="Black font, white background, vector" , noise_level=40 , callback=SCREAMING_SNAKE_CASE_ ) final_image.save("diffusers_library.jpg" ) if __name__ == "__main__": main()
283
0
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case_ : Optional[Any] = 16 snake_case_ : Optional[int] = 32 def A (__A : Accelerator , __A : int = 16 ) -> List[str]: """simple docstring""" UpperCAmelCase_ = AutoTokenizer.from_pretrained('''bert-base-cased''' ) UpperCAmelCase_ = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__A : List[Any] ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__A , max_length=__A ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase_ = datasets.map( __A , batched=__A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__A : int ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase_ = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase_ = 8 else: UpperCAmelCase_ = None return tokenizer.pad( __A , padding='''longest''' , max_length=__A , pad_to_multiple_of=__A , return_tensors='''pt''' , ) # Instantiate dataloaders. UpperCAmelCase_ = DataLoader( tokenized_datasets['''train'''] , shuffle=__A , collate_fn=__A , batch_size=__A ) UpperCAmelCase_ = DataLoader( tokenized_datasets['''validation'''] , shuffle=__A , collate_fn=__A , batch_size=__A ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders snake_case_ : Tuple = mocked_dataloaders # noqa: F811 def A (__A : Optional[int] , __A : Optional[int] ) -> int: """simple docstring""" if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __A ) == "1": UpperCAmelCase_ = 2 # Initialize accelerator UpperCAmelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ = config['''lr'''] UpperCAmelCase_ = int(config['''num_epochs'''] ) UpperCAmelCase_ = int(config['''seed'''] ) UpperCAmelCase_ = int(config['''batch_size'''] ) UpperCAmelCase_ = evaluate.load('''glue''' , '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=__A ) def inner_training_loop(__A : List[str] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(__A ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__A ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase_ = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase_ = AdamW(params=model.parameters() , lr=__A ) UpperCAmelCase_ , UpperCAmelCase_ = get_dataloaders(__A , __A ) # Instantiate scheduler UpperCAmelCase_ = get_linear_schedule_with_warmup( optimizer=__A , num_warmup_steps=100 , num_training_steps=(len(__A ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare( __A , __A , __A , __A , __A ) # Now we train the model for epoch in range(__A ): model.train() for step, batch in enumerate(__A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCAmelCase_ = model(**__A ) UpperCAmelCase_ = outputs.loss accelerator.backward(__A ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ = model(**__A ) UpperCAmelCase_ = outputs.logits.argmax(dim=-1 ) UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__A , references=__A , ) UpperCAmelCase_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __A ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def A () -> str: """simple docstring""" UpperCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__A , default=__A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__A , __A ) if __name__ == "__main__": main()
51
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Dict = "mobilenet_v2" def __init__( self , __A=3 , __A=224 , __A=1.0 , __A=8 , __A=8 , __A=6 , __A=32 , __A=True , __A=True , __A="relu6" , __A=True , __A=0.8 , __A=0.02 , __A=0.001 , __A=255 , **__A , ): """simple docstring""" super().__init__(**__A ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) lowerCamelCase : str = num_channels lowerCamelCase : Any = image_size lowerCamelCase : Union[str, Any] = depth_multiplier lowerCamelCase : Tuple = depth_divisible_by lowerCamelCase : Dict = min_depth lowerCamelCase : Dict = expand_ratio lowerCamelCase : Optional[Any] = output_stride lowerCamelCase : int = first_layer_is_expansion lowerCamelCase : Union[str, Any] = finegrained_output lowerCamelCase : Optional[Any] = hidden_act lowerCamelCase : Optional[Any] = tf_padding lowerCamelCase : Optional[Any] = classifier_dropout_prob lowerCamelCase : Dict = initializer_range lowerCamelCase : str = layer_norm_eps lowerCamelCase : Optional[Any] = semantic_loss_ignore_index class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Union[str, Any] = version.parse("1.11" ) @property def _snake_case ( self ): """simple docstring""" return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _snake_case ( self ): """simple docstring""" if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _snake_case ( self ): """simple docstring""" return 1e-4
283
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : List[str] = { """configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""], """feature_extraction_whisper""": ["""WhisperFeatureExtractor"""], """processing_whisper""": ["""WhisperProcessor"""], """tokenization_whisper""": ["""WhisperTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""WhisperTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = [ """WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""", """WhisperForConditionalGeneration""", """WhisperModel""", """WhisperPreTrainedModel""", """WhisperForAudioClassification""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = [ """TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFWhisperForConditionalGeneration""", """TFWhisperModel""", """TFWhisperPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = [ """FlaxWhisperForConditionalGeneration""", """FlaxWhisperModel""", """FlaxWhisperPreTrainedModel""", """FlaxWhisperForAudioClassification""", ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys __lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) _snake_case = 2_99_79_24_58 # Symbols _snake_case , _snake_case , _snake_case , _snake_case = symbols('''ct x y z''') def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if velocity > c: raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError("Speed must be greater than or equal to 1!" ) return velocity / c def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_ ) ** 2 ) def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return np.array( [ [gamma(SCREAMING_SNAKE_CASE_ ), -gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), 0, 0], [-gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), gamma(SCREAMING_SNAKE_CASE_ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' if event is None: lowerCamelCase : Tuple = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(SCREAMING_SNAKE_CASE_ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: _snake_case = transform(29_97_92_45) print('''Example of four vector: ''') print(f'''ct\' = {four_vector[0]}''') print(f'''x\' = {four_vector[1]}''') print(f'''y\' = {four_vector[2]}''') print(f'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values _snake_case = {ct: c, x: 1, y: 1, z: 1} _snake_case = [four_vector[i].subs(sub_dict) for i in range(4)] print(f'''\n{numerical_vector}''')
283
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a__ : List[str] =logging.get_logger(__name__) a__ : Any ='''▁''' a__ : Dict ={'''vocab_file''': '''sentencepiece.bpe.model'''} a__ : List[Any] ={ '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } a__ : List[Any] ={ '''facebook/xglm-564M''': 2_048, } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : List[Any] =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : Optional[Any] =["input_ids", "attention_mask"] def __init__( self : Dict , __A : List[Any] , __A : Dict="<s>" , __A : Any="</s>" , __A : Union[str, Any]="</s>" , __A : Optional[int]="<s>" , __A : Dict="<unk>" , __A : Optional[Any]="<pad>" , __A : Optional[Dict[str, Any]] = None , **__A : Union[str, Any] , ): __UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer __UpperCamelCase = 7 __UpperCamelCase = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )] __UpperCamelCase = kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , ) __UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__A ) ) __UpperCamelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __UpperCamelCase = 1 # Mimic fairseq token-to-id alignment for the first 4 token __UpperCamelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} __UpperCamelCase = len(self.sp_model ) __UpperCamelCase = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(__A ) __UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Tuple ): __UpperCamelCase = self.__dict__.copy() __UpperCamelCase = None __UpperCamelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self : Any , __A : Optional[int] ): __UpperCamelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __UpperCamelCase = {} __UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _lowerCamelCase ( self : Any , __A : List[int] , __A : Optional[List[int]] = None ): if token_ids_a is None: return [self.sep_token_id] + token_ids_a __UpperCamelCase = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def _lowerCamelCase ( self : Any , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) if token_ids_a is None: return [1] + ([0] * len(__A )) return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) def _lowerCamelCase ( self : Dict , __A : List[int] , __A : Optional[List[int]] = None ): __UpperCamelCase = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def _lowerCamelCase ( self : Optional[int] ): return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCamelCase ( self : List[Any] , __A : str ): return self.sp_model.encode(__A , out_type=__A ) def _lowerCamelCase ( self : int , __A : Any ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __UpperCamelCase = self.sp_model.PieceToId(__A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowerCamelCase ( self : List[str] , __A : List[str] ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowerCamelCase ( self : Dict , __A : int ): __UpperCamelCase = ''.join(__A ).replace(__A , ' ' ).strip() return out_string def _lowerCamelCase ( self : int , __A : str , __A : Optional[str] = None ): if not os.path.isdir(__A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCamelCase = os.path.join( __A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __A ) elif not os.path.isfile(self.vocab_file ): with open(__A , 'wb' ) as fi: __UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(__A ) return (out_vocab_file,)
53
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor _snake_case = logging.get_logger(__name__) class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' def __init__( self , *__A , **__A ): """simple docstring""" warnings.warn( "The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DPTImageProcessor instead." , __A , ) super().__init__(*__A , **__A )
283
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer a__ : int = logging.get_logger(__name__) a__ : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} a__ : List[Any] = { '''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''}, '''tokenizer_file''': { '''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json''' }, } a__ : Optional[Any] = {'''mobilebert-uncased''': 5_1_2} a__ : Any = {} class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Tuple = VOCAB_FILES_NAMES snake_case__ : str = PRETRAINED_VOCAB_FILES_MAP snake_case__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION snake_case__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : Dict = MobileBertTokenizer def __init__( self : Tuple , UpperCAmelCase__ : int=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]="[UNK]" , UpperCAmelCase__ : List[Any]="[SEP]" , UpperCAmelCase__ : Optional[Any]="[PAD]" , UpperCAmelCase__ : Tuple="[CLS]" , UpperCAmelCase__ : Tuple="[MASK]" , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) -> Tuple: super().__init__( UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCAmelCase__ ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase__ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase__ ) != tokenize_chinese_chars ): __SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase__ , normalizer_state.pop("type" ) ) __SCREAMING_SNAKE_CASE = do_lower_case __SCREAMING_SNAKE_CASE = strip_accents __SCREAMING_SNAKE_CASE = tokenize_chinese_chars __SCREAMING_SNAKE_CASE = normalizer_class(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = do_lower_case def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=None ) -> List[Any]: __SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]: __SCREAMING_SNAKE_CASE = [self.sep_token_id] __SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]: __SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ ) return tuple(UpperCAmelCase__ )
54
import argparse _snake_case = '''docs/source/_static/js/custom.js''' def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' with open(SCREAMING_SNAKE_CASE_ , encoding="utf-8" , newline="\n" ) as f: lowerCamelCase : List[str] = f.readlines() lowerCamelCase : int = 0 # First let's put the right version while not lines[index].startswith("const stableVersion =" ): index += 1 lowerCamelCase : str = f"""const stableVersion = \"v{version}\"\n""" # Then update the dictionary while not lines[index].startswith("const versionMapping = {" ): index += 1 # We go until the end while not lines[index].startswith("}" ): index += 1 # We add the new version at the end lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n""" with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument('''--version''', help='''Release version.''') _snake_case = parser.parse_args() update_custom_js(args.version)
283
0
'''simple docstring''' import requests from bsa import BeautifulSoup def __snake_case ( UpperCAmelCase_ : str = "AAPL" ): lowerCamelCase_ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' lowerCamelCase_ = BeautifulSoup(requests.get(UpperCAmelCase_ ).text , "html.parser" ) lowerCamelCase_ = "My(6px) Pos(r) smartphone_Mt(6px)" return soup.find("div" , class_=class_ ).find("span" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
55
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Any = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[int] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : str = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : List[Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[int] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Dict = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Dict = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Union[str, Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[int] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Any = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : int = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] )
283
0
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> bool: '''simple docstring''' snake_case_ = len(__UpperCAmelCase ) snake_case_ = len(__UpperCAmelCase ) snake_case_ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] snake_case_ = True for i in range(__UpperCAmelCase ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: snake_case_ = True if a[i].islower(): snake_case_ = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
56
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Dict = "openai/whisper-base" __A : str = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __A : Any = "transcriber" __A : Any = WhisperProcessor __A : int = WhisperForConditionalGeneration __A : Any = ["audio"] __A : List[str] = ["text"] def _snake_case ( self , __A ): """simple docstring""" return self.pre_processor(__A , return_tensors="pt" ).input_features def _snake_case ( self , __A ): """simple docstring""" return self.model.generate(inputs=__A ) def _snake_case ( self , __A ): """simple docstring""" return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0]
283
0
"""simple docstring""" from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. A : Optional[Any] = 2_0_0 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. A : Any = 5_0 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. A : int = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1_0_0_0)) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = len([g for position, g in enumerate(_UpperCamelCase ) if g == main_target[position]] ) return (item, float(_UpperCamelCase )) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = random.randint(0 , len(_UpperCamelCase ) - 1 ) __lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:] __lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = list(_UpperCamelCase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: __lowerCAmelCase = random.choice(_UpperCamelCase ) return "".join(_UpperCamelCase ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ): '''simple docstring''' __lowerCAmelCase = [] # Generate more children proportionally to the fitness score. __lowerCAmelCase = int(parent_a[1] * 100 ) + 1 __lowerCAmelCase = 10 if child_n >= 10 else child_n for _ in range(_UpperCamelCase ): __lowerCAmelCase = population_score[random.randint(0 , _UpperCamelCase )][0] __lowerCAmelCase , __lowerCAmelCase = crossover(parent_a[0] , _UpperCamelCase ) # Append new string to the population list. pop.append(mutate(_UpperCamelCase , _UpperCamelCase ) ) pop.append(mutate(_UpperCamelCase , _UpperCamelCase ) ) return pop def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = True ): '''simple docstring''' if N_POPULATION < N_SELECTED: __lowerCAmelCase = f"{N_POPULATION} must be bigger than {N_SELECTED}" raise ValueError(_UpperCamelCase ) # Verify that the target contains no genes besides the ones inside genes variable. __lowerCAmelCase = sorted({c for c in target if c not in genes} ) if not_in_genes_list: __lowerCAmelCase = f"{not_in_genes_list} is not in genes list, evolution cannot converge" raise ValueError(_UpperCamelCase ) # Generate random starting population. __lowerCAmelCase = [] for _ in range(_UpperCamelCase ): population.append("".join([random.choice(_UpperCamelCase ) for i in range(len(_UpperCamelCase ) )] ) ) # Just some logs to know what the algorithms is doing. __lowerCAmelCase , __lowerCAmelCase = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(_UpperCamelCase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. __lowerCAmelCase = [evaluate(_UpperCamelCase , _UpperCamelCase ) for item in population] # Check if there is a matching evolution. __lowerCAmelCase = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : x[1] , reverse=_UpperCamelCase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f"\nGeneration: {generation}" f"\nTotal Population:{total_population}" f"\nBest score: {population_score[0][1]}" f"\nBest string: {population_score[0][0]}" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. __lowerCAmelCase = population[: int(N_POPULATION / 3 )] population.clear() population.extend(_UpperCamelCase ) # Normalize population score to be between 0 and 1. __lowerCAmelCase = [ (item, score / len(_UpperCamelCase )) for item, score in population_score ] # This is selection for i in range(_UpperCamelCase ): population.extend(select(population_score[int(_UpperCamelCase )] , _UpperCamelCase , _UpperCamelCase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(_UpperCamelCase ) > N_POPULATION: break if __name__ == "__main__": A : Union[str, Any] = ( "This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!" ) A : Tuple = list( " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm" "nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\" ) A , A , A : List[Any] = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
57
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if "model" in orig_key: lowerCamelCase : Dict = orig_key.replace("model." , "" ) if "norm1" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("norm1" , "attention.output.LayerNorm" ) if "norm2" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("norm2" , "output.LayerNorm" ) if "norm" in orig_key: lowerCamelCase : Optional[Any] = orig_key.replace("norm" , "LayerNorm" ) if "transformer" in orig_key: lowerCamelCase : int = orig_key.split("." )[0].split("_" )[-1] lowerCamelCase : Dict = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: lowerCamelCase : List[str] = orig_key.replace("mha.attn" , "attention.self" ) if "mha" in orig_key: lowerCamelCase : List[Any] = orig_key.replace("mha" , "attention" ) if "W_q" in orig_key: lowerCamelCase : Optional[int] = orig_key.replace("W_q" , "self.query" ) if "W_k" in orig_key: lowerCamelCase : List[Any] = orig_key.replace("W_k" , "self.key" ) if "W_v" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("W_v" , "self.value" ) if "ff1" in orig_key: lowerCamelCase : Union[str, Any] = orig_key.replace("ff1" , "intermediate.dense" ) if "ff2" in orig_key: lowerCamelCase : Optional[int] = orig_key.replace("ff2" , "output.dense" ) if "ff" in orig_key: lowerCamelCase : Optional[int] = orig_key.replace("ff" , "output.dense" ) if "mlm_class" in orig_key: lowerCamelCase : Dict = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" ) if "mlm" in orig_key: lowerCamelCase : List[Any] = orig_key.replace("mlm" , "cls.predictions.transform" ) if "cls" not in orig_key: lowerCamelCase : int = "yoso." + orig_key return orig_key def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCamelCase : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if ("pooler" in key) or ("sen_class" in key): continue else: lowerCamelCase : Dict = val lowerCamelCase : Dict = orig_state_dict["cls.predictions.decoder.bias"] lowerCamelCase : Dict = torch.arange(SCREAMING_SNAKE_CASE_ ).expand((1, -1) ) + 2 return orig_state_dict def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : List[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model_state_dict"] lowerCamelCase : List[str] = YosoConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Any = YosoForMaskedLM(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : List[Any] = convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE_ ) print(model.load_state_dict(SCREAMING_SNAKE_CASE_ ) ) model.eval() model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The json file for YOSO model config.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _snake_case = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
283
0
'''simple docstring''' import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class a_ ( unittest.TestCase ): '''simple docstring''' @property def snake_case_( self ) -> Any: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def snake_case_( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = ort.SessionOptions() _SCREAMING_SNAKE_CASE = False return options def snake_case_( self ) -> int: _SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) _SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) _SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" ) # using the PNDM scheduler by default _SCREAMING_SNAKE_CASE = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=A ) _SCREAMING_SNAKE_CASE = """A red cat sitting on a park bench""" _SCREAMING_SNAKE_CASE = np.random.RandomState(0 ) _SCREAMING_SNAKE_CASE = pipe( prompt=A , image=A , mask_image=A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=A , output_type="""np""" , ) _SCREAMING_SNAKE_CASE = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-2
58
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Optional[int] = "M-CLIP" def __init__( self , __A=1024 , __A=768 , **__A ): """simple docstring""" lowerCamelCase : str = transformerDimSize lowerCamelCase : Any = imageDimSize super().__init__(**__A ) class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Tuple = MCLIPConfig def __init__( self , __A , *__A , **__A ): """simple docstring""" super().__init__(__A , *__A , **__A ) lowerCamelCase : Tuple = XLMRobertaModel(__A ) lowerCamelCase : Optional[Any] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def _snake_case ( self , __A , __A ): """simple docstring""" lowerCamelCase : Any = self.transformer(input_ids=__A , attention_mask=__A )[0] lowerCamelCase : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(__A ), embs
283
0
from __future__ import annotations import queue class UpperCAmelCase : def __init__(self : Union[str, Any] , snake_case__ : int ) -> Optional[int]: '''simple docstring''' snake_case : int = data snake_case : Optional[Any] = None snake_case : Optional[Any] = None def UpperCamelCase ( ): print("\n********Press N to stop entering at any point of time********\n" ) snake_case : Union[str, Any] = input("Enter the value of the root node: " ).strip().lower() snake_case : queue.Queue = queue.Queue() snake_case : Union[str, Any] = TreeNode(int(__lowerCamelCase ) ) q.put(__lowerCamelCase ) while not q.empty(): snake_case : Optional[Any] = q.get() snake_case : Optional[int] = f"""Enter the left node of {node_found.data}: """ snake_case : Any = input(__lowerCamelCase ).strip().lower() or "n" if check == "n": return tree_node snake_case : int = TreeNode(int(__lowerCamelCase ) ) snake_case : Tuple = left_node q.put(__lowerCamelCase ) snake_case : Dict = f"""Enter the right node of {node_found.data}: """ snake_case : str = input(__lowerCamelCase ).strip().lower() or "n" if check == "n": return tree_node snake_case : Any = TreeNode(int(__lowerCamelCase ) ) snake_case : List[Any] = right_node q.put(__lowerCamelCase ) raise def UpperCamelCase ( __lowerCamelCase : TreeNode ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return print(node.data , end="," ) pre_order(node.left ) pre_order(node.right ) def UpperCamelCase ( __lowerCamelCase : TreeNode ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return in_order(node.left ) print(node.data , end="," ) in_order(node.right ) def UpperCamelCase ( __lowerCamelCase : TreeNode ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end="," ) def UpperCamelCase ( __lowerCamelCase : TreeNode ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return snake_case : queue.Queue = queue.Queue() q.put(__lowerCamelCase ) while not q.empty(): snake_case : int = q.get() print(node_dequeued.data , end="," ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def UpperCamelCase ( __lowerCamelCase : TreeNode ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return snake_case : queue.Queue = queue.Queue() q.put(__lowerCamelCase ) while not q.empty(): snake_case : Dict = [] while not q.empty(): snake_case : Optional[Any] = q.get() print(node_dequeued.data , end="," ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__lowerCamelCase ) def UpperCamelCase ( __lowerCamelCase : TreeNode ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return snake_case : list[TreeNode] = [] snake_case : Tuple = node while n or stack: while n: # start from root node, find its left child print(n.data , end="," ) stack.append(__lowerCamelCase ) snake_case : Dict = n.left # end of while means current node doesn't have left child snake_case : Optional[int] = stack.pop() # start to traverse its right child snake_case : List[Any] = n.right def UpperCamelCase ( __lowerCamelCase : TreeNode ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return snake_case : list[TreeNode] = [] snake_case : Dict = node while n or stack: while n: stack.append(__lowerCamelCase ) snake_case : List[Any] = n.left snake_case : List[Any] = stack.pop() print(n.data , end="," ) snake_case : str = n.right def UpperCamelCase ( __lowerCamelCase : TreeNode ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return snake_case , snake_case : List[Any] = [], [] snake_case : Any = node stacka.append(__lowerCamelCase ) while stacka: # to find the reversed order of post order, store it in stack2 snake_case : Optional[Any] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__lowerCamelCase ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end="," ) def UpperCamelCase ( __lowerCamelCase : str = "" , __lowerCamelCase : Tuple=50 , __lowerCamelCase : int="*" ): if not s: return "\n" + width * char snake_case , snake_case : Optional[Any] = divmod(width - len(__lowerCamelCase ) - 2 , 2 ) return f"""{left * char} {s} {(left + extra) * char}""" if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) __lowerCamelCase = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
59
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = tempfile.mkdtemp() # fmt: off lowerCamelCase : Any = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on lowerCamelCase : List[Any] = dict(zip(__A , range(len(__A ) ) ) ) lowerCamelCase : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] lowerCamelCase : Optional[Any] = {"unk_token": "<unk>"} lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__A ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__A ) ) lowerCamelCase : str = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } lowerCamelCase : str = os.path.join(self.tmpdirname , __A ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(__A , __A ) def _snake_case ( self , **__A ): """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__A ) def _snake_case ( self , **__A ): """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__A ) def _snake_case ( self , **__A ): """simple docstring""" return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__A ) def _snake_case ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase : Tuple = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = self.get_tokenizer() lowerCamelCase : Optional[Any] = self.get_rust_tokenizer() lowerCamelCase : Tuple = self.get_image_processor() lowerCamelCase : List[Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A ) processor_slow.save_pretrained(self.tmpdirname ) lowerCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__A ) lowerCamelCase : Optional[int] = OwlViTProcessor(tokenizer=__A , image_processor=__A ) processor_fast.save_pretrained(self.tmpdirname ) lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __A ) self.assertIsInstance(processor_fast.tokenizer , __A ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __A ) self.assertIsInstance(processor_fast.image_processor , __A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowerCamelCase : List[str] = self.get_image_processor(do_normalize=__A ) lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__A ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = self.get_image_processor() lowerCamelCase : Optional[int] = self.get_tokenizer() lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Tuple = self.prepare_image_inputs() lowerCamelCase : int = image_processor(__A , return_tensors="np" ) lowerCamelCase : Union[str, Any] = processor(images=__A , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Union[str, Any] = self.get_image_processor() lowerCamelCase : Dict = self.get_tokenizer() lowerCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Tuple = "lower newer" lowerCamelCase : Union[str, Any] = processor(text=__A , return_tensors="np" ) lowerCamelCase : List[Any] = tokenizer(__A , return_tensors="np" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Any = self.get_image_processor() lowerCamelCase : Any = self.get_tokenizer() lowerCamelCase : int = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Optional[Any] = "lower newer" lowerCamelCase : Dict = self.prepare_image_inputs() lowerCamelCase : Any = processor(text=__A , images=__A ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Any = "google/owlvit-base-patch32" lowerCamelCase : List[Any] = OwlViTProcessor.from_pretrained(__A ) lowerCamelCase : Tuple = ["cat", "nasa badge"] lowerCamelCase : str = processor(text=__A ) lowerCamelCase : Union[str, Any] = 16 self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = "google/owlvit-base-patch32" lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(__A ) lowerCamelCase : Dict = [["cat", "nasa badge"], ["person"]] lowerCamelCase : int = processor(text=__A ) lowerCamelCase : Tuple = 16 lowerCamelCase : Any = len(__A ) lowerCamelCase : Optional[Any] = max([len(__A ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Dict = "google/owlvit-base-patch32" lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(__A ) lowerCamelCase : List[Any] = ["cat", "nasa badge"] lowerCamelCase : Optional[Any] = processor(text=__A ) lowerCamelCase : int = 16 lowerCamelCase : List[str] = inputs["input_ids"] lowerCamelCase : int = [ [4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Any = self.get_image_processor() lowerCamelCase : List[str] = self.get_tokenizer() lowerCamelCase : str = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Dict = self.prepare_image_inputs() lowerCamelCase : Union[str, Any] = self.prepare_image_inputs() lowerCamelCase : Any = processor(images=__A , query_images=__A ) self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__A ): processor() def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[Any] = self.get_image_processor() lowerCamelCase : Optional[int] = self.get_tokenizer() lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A ) lowerCamelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase : List[Any] = processor.batch_decode(__A ) lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(__A ) self.assertListEqual(__A , __A )
283
0
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def _snake_case ( _snake_case : int ): for param in module.parameters(): lowerCAmelCase : Optional[int] = False def _snake_case ( ): lowerCAmelCase : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowerCAmelCase : Any = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def _snake_case ( _snake_case : Dict ): lowerCAmelCase : Optional[int] = plt.imshow(_snake_case ) fig.axes.get_xaxis().set_visible(_snake_case ) fig.axes.get_yaxis().set_visible(_snake_case ) plt.show() def _snake_case ( ): lowerCAmelCase : List[str] = datetime.now() lowerCAmelCase : Union[str, Any] = current_time.strftime('''%H:%M:%S''' ) return timestamp
60
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase_ ( UpperCamelCase , unittest.TestCase ): '''simple docstring''' __A : List[Any] = BioGptTokenizer __A : Optional[int] = False def _snake_case ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase : Union[str, Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] lowerCamelCase : str = dict(zip(__A , range(len(__A ) ) ) ) lowerCamelCase : Dict = ["l o 123", "lo w 1456", "e r</w> 1789", ""] lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(__A ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(__A ) ) def _snake_case ( self , __A ): """simple docstring""" lowerCamelCase : Dict = "lower newer" lowerCamelCase : Union[str, Any] = "lower newer" return input_text, output_text def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[str] = BioGptTokenizer(self.vocab_file , self.merges_file ) lowerCamelCase : Optional[int] = "lower" lowerCamelCase : Any = ["low", "er</w>"] lowerCamelCase : List[str] = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCamelCase : Union[str, Any] = tokens + ["<unk>"] lowerCamelCase : List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) @slow def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[str] = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) lowerCamelCase : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=__A ) lowerCamelCase : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=__A ) lowerCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__A ) lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
283
0